hexsha
stringlengths 40
40
| size
int64 6
14.9M
| ext
stringclasses 1
value | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 6
260
| max_stars_repo_name
stringlengths 6
119
| max_stars_repo_head_hexsha
stringlengths 40
41
| max_stars_repo_licenses
list | max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 6
260
| max_issues_repo_name
stringlengths 6
119
| max_issues_repo_head_hexsha
stringlengths 40
41
| max_issues_repo_licenses
list | max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 6
260
| max_forks_repo_name
stringlengths 6
119
| max_forks_repo_head_hexsha
stringlengths 40
41
| max_forks_repo_licenses
list | max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | avg_line_length
float64 2
1.04M
| max_line_length
int64 2
11.2M
| alphanum_fraction
float64 0
1
| cells
list | cell_types
list | cell_type_groups
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a17093bb6348674e9531e1edeb4f72a308d833e
| 10,310 |
ipynb
|
Jupyter Notebook
|
test.ipynb
|
dario-github/notion_api
|
c979903cc16e2b8e6469ab453300e954192eb39d
|
[
"MIT"
] | 1 |
2021-08-17T11:28:02.000Z
|
2021-08-17T11:28:02.000Z
|
test.ipynb
|
dario-github/notion_api
|
c979903cc16e2b8e6469ab453300e954192eb39d
|
[
"MIT"
] | null | null | null |
test.ipynb
|
dario-github/notion_api
|
c979903cc16e2b8e6469ab453300e954192eb39d
|
[
"MIT"
] | null | null | null | 25.45679 | 302 | 0.505917 |
[
[
[
"import requests\nimport arrow\nimport pprint\nimport json\nfrom urllib.parse import urlencode\nfrom functools import reduce",
"_____no_output_____"
],
[
"token = open(\"./NOTION_TOKEN\", \"r\").readlines()[0]\nnotion_version = \"2021-08-16\"",
"_____no_output_____"
],
[
"extra_data = {\"filter\": {\"and\": [{\"property\": \"标签\",\n \"multi_select\": {\"is_not_empty\": True}},],},}",
"_____no_output_____"
],
[
"r_database = requests.post(\n url=\"https://api.notion.com/v1/databases/cecf4bb039dc46bca130a29a9db58906/query\",\n headers={\"Authorization\": \"Bearer \" + token,\n \"Notion-Version\": notion_version,\n \"Content-Type\": \"application/json\",\n },\n data=json.dumps(extra_data),\n)",
"_____no_output_____"
],
[
"respond = json.loads(r_database.text)",
"_____no_output_____"
],
[
"def take_page_plain_text(respond: dict):\n for result in respond[\"results\"]:\n page_id = result[\"url\"].split(\"/\")[-1].split(\"-\")[-1]\n r_page = requests.get(\n url=f\"https://api.notion.com/v1/blocks/{page_id}/children\",\n headers={\"Authorization\": f\"Bearer {token}\",\n \"Notion-Version\": notion_version,\n \"Content-Type\": \"application/json\",\n },\n )\n for block in json.loads(r_page.text).get(\"results\", []):\n for key in block:\n if not isinstance(block[key], dict):\n continue\n if \"text\" not in block[key]:\n continue\n for text in block[key][\"text\"]:\n yield text[\"plain_text\"]",
"_____no_output_____"
],
[
"text_list = list(take_page_plain_text(respond))",
"_____no_output_____"
],
[
"text_list[:3]",
"_____no_output_____"
],
[
"import pandas as pd\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport jieba",
"_____no_output_____"
],
[
"import sys\nfrom unicodedata import category\ncodepoints = range(sys.maxunicode + 1)\npunctuation = {c for k in codepoints if category(c := chr(k)).startswith(\"P\")}",
"_____no_output_____"
],
[
"from functional import seq\nsplit_text_list = [jieba.lcut(text, HMM=True) for text in text_list]",
"Building prefix dict from the default dictionary ...\nDumping model to file cache /tmp/jieba.cache\nLoading model cost 1.491 seconds.\nPrefix dict has been built successfully.\n"
],
[
"from glob import glob\n\nstopfiles = glob(\"./stopwords/*stopwords.txt\")\n\nstopwords = reduce(lambda x,y: x.union(y), [set([x.strip() for x in open(file, \"r\").readlines()]) for file in stopfiles])",
"_____no_output_____"
],
[
"def check_stopwords(word):\n return word in stopwords \\\n or word in punctuation \\\n or word.isdigit()",
"_____no_output_____"
],
[
"sequence = seq(split_text_list).map(lambda sent: [word for word in sent if not check_stopwords(word)])",
"_____no_output_____"
],
[
"uniqueWords = (sequence\n .map(lambda sent: set(sent))\n .reduce(lambda x, y: x.union(y))\n )",
"_____no_output_____"
],
[
"word2sents = {word.lower(): set() for word in uniqueWords}",
"_____no_output_____"
],
[
"for text in text_list:\n for word in uniqueWords:\n if word in text:\n word2sents[word.lower()].add(text)",
"_____no_output_____"
]
],
[
[
"## 现有库",
"_____no_output_____"
]
],
[
[
"vectorizer = TfidfVectorizer()\nvectors = vectorizer.fit_transform(sequence.map(lambda x: \" \".join(x)).to_list())\nfeature_names = vectorizer.get_feature_names()\ndense = vectors.todense()\ndenselist = dense.tolist()\ndf = pd.DataFrame(denselist, columns=feature_names)",
"/home/zdc/.cache/pypoetry/virtualenvs/jupyter-env-st7wAHic-py3.8/lib/python3.8/site-packages/sklearn/utils/deprecation.py:87: FutureWarning: Function get_feature_names is deprecated; get_feature_names is deprecated in 1.0 and will be removed in 1.2. Please use get_feature_names_out instead.\n warnings.warn(msg, category=FutureWarning)\n"
],
[
"df.max(axis=0).sort_values(key=lambda x: -x).to_csv(\"./tf_idf_topic.csv\")",
"_____no_output_____"
],
[
"for word in df.max(axis=0).sort_values(key=lambda x: -x).head(3).index:\n print(word)\n print(word2sents[word])\n print(\"-\" * 10)",
"0x320646e7b37d5a31f5dcef9ccff9180eeb63b004\n{'0x320646e7b37d5a31f5dcef9ccff9180eeb63b004'}\n----------\n补充\n{'CV补充', '补充'}\n----------\n分散\n{'分散>集中', '注意力分散(大脑很难同时专注于2件事,其中一件事会倾向欲望和直觉)'}\n----------\n"
]
],
[
[
"## 自定义(不是tf*idf)",
"_____no_output_____"
]
],
[
[
"uniqueWords = (sequence\n .map(lambda sent: set(sent))\n .reduce(lambda x, y: x.union(y))\n )",
"_____no_output_____"
],
[
"def computeTF(wordDict, bagOfWords):\n tfDict = {}\n bagOfWordsCount = len(bagOfWords)\n for word, count in wordDict.items():\n tfDict[word] = count / float(bagOfWordsCount)\n return tfDict",
"_____no_output_____"
],
[
"def computeIDF(documents):\n import math\n N = len(documents)\n \n idfDict = dict.fromkeys(documents[0].keys(), 0)\n for document in documents:\n for word, val in document.items():\n if val > 0:\n idfDict[word] += 1\n \n for word, val in idfDict.items():\n idfDict[word] = math.log(N / float(val))\n return idfDict",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
4a170a95a3ec8cccdc415fc705e3ba984c72157f
| 1,653 |
ipynb
|
Jupyter Notebook
|
exception handling.ipynb
|
hritik2504/letsupgradeB7python
|
ef9f16f7193a0cb92d0120f3013f3aa0e386d187
|
[
"Apache-2.0"
] | null | null | null |
exception handling.ipynb
|
hritik2504/letsupgradeB7python
|
ef9f16f7193a0cb92d0120f3013f3aa0e386d187
|
[
"Apache-2.0"
] | null | null | null |
exception handling.ipynb
|
hritik2504/letsupgradeB7python
|
ef9f16f7193a0cb92d0120f3013f3aa0e386d187
|
[
"Apache-2.0"
] | null | null | null | 17.4 | 63 | 0.464005 |
[
[
[
"%%writefile test.txt\n\nThis is the test file ",
"Overwriting test.txt\n"
],
[
"f = open(\"test.txt\",'r')\nf.read()",
"_____no_output_____"
],
[
"try:\n f.write(\"my first file\\n\")\nexcept:\n print(\"unable to write! file is in read mode \")\nfinally:\n f.close()",
"unable to write! file is in read mode \n"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code"
]
] |
4a1714dbdf90c296c72f87d6f8451eb979b7c40b
| 199,070 |
ipynb
|
Jupyter Notebook
|
notebooks/027_exp.ipynb
|
skywalker0415/jigsaw
|
d0600db0884e1e79f01e1908c62dd10bb513a462
|
[
"RSA-MD"
] | 4 |
2022-02-09T04:50:55.000Z
|
2022-02-14T03:43:44.000Z
|
notebooks/027_exp.ipynb
|
skywalker0415/jigsaw
|
d0600db0884e1e79f01e1908c62dd10bb513a462
|
[
"RSA-MD"
] | null | null | null |
notebooks/027_exp.ipynb
|
skywalker0415/jigsaw
|
d0600db0884e1e79f01e1908c62dd10bb513a462
|
[
"RSA-MD"
] | null | null | null | 65.917219 | 51,312 | 0.667685 |
[
[
[
"",
"_____no_output_____"
],
[
"<div class = 'alert alert-block alert-info'\n style = 'background-color:#4c1c84;\n color:#eeebf1;\n border-width:5px;\n border-color:#4c1c84;\n font-family:Comic Sans MS;\n border-radius: 50px 50px'>\n <p style = 'font-size:24px'>Exp 027</p>\n <a href = \"#Config\"\n style = \"color:#eeebf1;\n font-size:14px\">1.Config</a><br>\n <a href = \"#Settings\"\n style = \"color:#eeebf1;\n font-size:14px\">2.Settings</a><br>\n <a href = \"#Data-Load\"\n style = \"color:#eeebf1;\n font-size:14px\">3.Data Load</a><br>\n <a href = \"#Pytorch-Settings\"\n style = \"color:#eeebf1;\n font-size:14px\">4.Pytorch Settings</a><br>\n <a href = \"#Training\"\n style = \"color:#eeebf1;\n font-size:14px\">5.Training</a><br>\n</div>\n\n<p style = 'font-size:24px;\n color:#4c1c84'>\n 実施したこと\n</p>\n <li style = \"color:#4c1c84;\n font-size:14px\">使用データ:Jigsaw2nd</li>\n <li style = \"color:#4c1c84;\n font-size:14px\">使用モデル:DeBERTa-Base</li>\n <li style = \"color:#4c1c84;\n font-size:14px\">New!! Attentionの可視化</li>",
"_____no_output_____"
],
[
"<br>\n<h1 style = \"font-size:45px; font-family:Comic Sans MS ; font-weight : normal; background-color: #4c1c84 ; color : #eeebf1; text-align: center; border-radius: 100px 100px;\">\n Config\n</h1>\n<br>",
"_____no_output_____"
]
],
[
[
"import sys\nsys.path.append(\"../src/utils/iterative-stratification/\")\nsys.path.append(\"../src/utils/detoxify\")\nsys.path.append(\"../src/utils/coral-pytorch/\")",
"_____no_output_____"
],
[
"import warnings\nwarnings.simplefilter('ignore')\n\nimport os\nimport gc\ngc.enable()\nimport sys\nimport glob\nimport copy\nimport math\nimport time\nimport random\nimport string\nimport psutil\nimport pathlib\nfrom pathlib import Path\nfrom contextlib import contextmanager\nfrom collections import defaultdict\nfrom box import Box\nfrom typing import Optional\nfrom pprint import pprint\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport japanize_matplotlib\n\nfrom tqdm.auto import tqdm as tqdmp\nfrom tqdm.autonotebook import tqdm as tqdm\ntqdmp.pandas()\n\n## Model\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import StratifiedKFold, KFold\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset, DataLoader\nfrom transformers import AutoTokenizer, AutoModel, AdamW\nfrom transformers import RobertaModel, RobertaForSequenceClassification\nfrom transformers import RobertaTokenizer\nfrom transformers import LukeTokenizer, LukeModel, LukeConfig\nfrom transformers import get_linear_schedule_with_warmup, get_cosine_schedule_with_warmup\nfrom transformers import BertTokenizer, BertForSequenceClassification\nfrom transformers import RobertaTokenizer, RobertaForSequenceClassification\nfrom transformers import XLMRobertaTokenizer, XLMRobertaForSequenceClassification\nfrom transformers import DebertaTokenizer, DebertaModel\n\n# Pytorch Lightning\nimport pytorch_lightning as pl\nfrom pytorch_lightning.utilities.seed import seed_everything\nfrom pytorch_lightning import callbacks\nfrom pytorch_lightning.callbacks.progress import ProgressBarBase\nfrom pytorch_lightning import LightningDataModule, LightningDataModule\nfrom pytorch_lightning import Trainer\nfrom pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping, LearningRateMonitor\nfrom pytorch_lightning.loggers import WandbLogger\nfrom pytorch_lightning.loggers.csv_logs import CSVLogger\nfrom pytorch_lightning.callbacks import RichProgressBar\n\nfrom sklearn.linear_model import Ridge\nfrom sklearn.svm import SVC, SVR\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom scipy.stats import rankdata\nfrom cuml.svm import SVR as cuml_SVR\nfrom cuml.linear_model import Ridge as cuml_Ridge\nimport cudf\nfrom detoxify import Detoxify\nfrom iterstrat.ml_stratifiers import MultilabelStratifiedKFold",
"2022-01-25 06:34:37.607316: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n"
],
[
"import torch\n\nconfig = {\n \"exp_comment\":\"Wiki AttackデータをLukeで学習\",\n \"seed\": 42,\n \"root\": \"/content/drive/MyDrive/kaggle/Jigsaw/raw\",\n \"n_fold\": 5,\n \"epoch\": 5,\n \"max_length\": 128,\n \"environment\": \"AWS\",\n \"project\": \"Jigsaw\",\n \"entity\": \"dataskywalker\",\n \"exp_name\": \"027_exp\",\n \"margin\": 0.5,\n \"train_fold\": [0, 1, 2, 3, 4],\n\n \"trainer\": {\n \"gpus\": 1,\n \"accumulate_grad_batches\": 8,\n \"progress_bar_refresh_rate\": 1,\n \"fast_dev_run\": True,\n \"num_sanity_val_steps\": 0,\n },\n\n \"train_loader\": {\n \"batch_size\": 4,\n \"shuffle\": True,\n \"num_workers\": 1,\n \"pin_memory\": True,\n \"drop_last\": True,\n },\n\n \"valid_loader\": {\n \"batch_size\": 4,\n \"shuffle\": False,\n \"num_workers\": 1,\n \"pin_memory\": True,\n \"drop_last\": False,\n },\n\n \"test_loader\": {\n \"batch_size\": 4,\n \"shuffle\": False,\n \"num_workers\": 1,\n \"pin_memory\": True,\n \"drop_last\": False,\n },\n\n \"backbone\": {\n \"name\": \"microsoft/deberta-base\",\n \"output_dim\": 1,\n },\n\n \"optimizer\": {\n \"name\": \"torch.optim.AdamW\",\n \"params\": {\n \"lr\": 1e-6,\n },\n },\n\n \"scheduler\": {\n \"name\": \"torch.optim.lr_scheduler.CosineAnnealingWarmRestarts\",\n \"params\": {\n \"T_0\": 20,\n \"eta_min\": 0,\n },\n },\n\n \"loss\": \"nn.BCEWithLogitsLoss\",\n}\n\nconfig = Box(config)\nconfig.tokenizer = DebertaTokenizer.from_pretrained(config.backbone.name)\nconfig.model = DebertaModel.from_pretrained(config.backbone.name)\n# pprint(config)",
"Some weights of the model checkpoint at microsoft/deberta-base were not used when initializing DebertaModel: ['lm_predictions.lm_head.LayerNorm.weight', 'lm_predictions.lm_head.bias', 'lm_predictions.lm_head.LayerNorm.bias', 'lm_predictions.lm_head.dense.bias', 'lm_predictions.lm_head.dense.weight']\n- This IS expected if you are initializing DebertaModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n- This IS NOT expected if you are initializing DebertaModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n"
],
[
"config.tokenizer.save_pretrained(f\"../data/processed/{config.backbone.name}\")\n\npretrain_model = DebertaModel.from_pretrained(config.backbone.name)\npretrain_model.save_pretrained(f\"../data/processed/{config.backbone.name}\")",
"Some weights of the model checkpoint at microsoft/deberta-base were not used when initializing DebertaModel: ['lm_predictions.lm_head.LayerNorm.weight', 'lm_predictions.lm_head.bias', 'lm_predictions.lm_head.LayerNorm.bias', 'lm_predictions.lm_head.dense.bias', 'lm_predictions.lm_head.dense.weight']\n- This IS expected if you are initializing DebertaModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n- This IS NOT expected if you are initializing DebertaModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n"
],
[
"# 個人的にAWSやKaggle環境やGoogle Colabを行ったり来たりしているのでまとめています\nimport os\nimport sys\nfrom pathlib import Path\n\nif config.environment == 'AWS':\n \n INPUT_DIR = Path('/mnt/work/data/kaggle/Jigsaw/')\n MODEL_DIR = Path(f'../models/{config.exp_name}/')\n OUTPUT_DIR = Path(f'../data/interim/{config.exp_name}/')\n UTIL_DIR = Path('/mnt/work/shimizu/kaggle/PetFinder/src/utils')\n \n os.makedirs(MODEL_DIR, exist_ok=True)\n os.makedirs(OUTPUT_DIR, exist_ok=True)\n \n print(f\"Your environment is 'AWS'.\\nINPUT_DIR is {INPUT_DIR}\\nMODEL_DIR is {MODEL_DIR}\\nOUTPUT_DIR is {OUTPUT_DIR}\\nUTIL_DIR is {UTIL_DIR}\")\n \n \nelif config.environment == 'Kaggle':\n INPUT_DIR = Path('../input/*****')\n MODEL_DIR = Path('./')\n OUTPUT_DIR = Path('./')\n print(f\"Your environment is 'Kaggle'.\\nINPUT_DIR is {INPUT_DIR}\\nMODEL_DIR is {MODEL_DIR}\\nOUTPUT_DIR is {OUTPUT_DIR}\")\n\n \nelif config.environment == 'Colab':\n INPUT_DIR = Path('/content/drive/MyDrive/kaggle/Jigsaw/raw')\n BASE_DIR = Path(\"/content/drive/MyDrive/kaggle/Jigsaw/interim\")\n\n MODEL_DIR = BASE_DIR / f'{config.exp_name}'\n OUTPUT_DIR = BASE_DIR / f'{config.exp_name}/'\n\n os.makedirs(MODEL_DIR, exist_ok=True)\n os.makedirs(OUTPUT_DIR, exist_ok=True)\n \n if not os.path.exists(INPUT_DIR):\n print('Please Mount your Google Drive.')\n else:\n print(f\"Your environment is 'Colab'.\\nINPUT_DIR is {INPUT_DIR}\\nMODEL_DIR is {MODEL_DIR}\\nOUTPUT_DIR is {OUTPUT_DIR}\")\n \nelse:\n print(\"Please choose 'AWS' or 'Kaggle' or 'Colab'.\\nINPUT_DIR is not found.\")",
"Your environment is 'AWS'.\nINPUT_DIR is /mnt/work/data/kaggle/Jigsaw\nMODEL_DIR is ../models/027_exp\nOUTPUT_DIR is ../data/interim/027_exp\nUTIL_DIR is /mnt/work/shimizu/kaggle/PetFinder/src/utils\n"
],
[
"# Seed固定\nseed_everything(config.seed)",
"_____no_output_____"
],
[
"## 処理時間計測\n@contextmanager\ndef timer(name:str, slack:bool=False):\n t0 = time.time()\n p = psutil.Process(os.getpid())\n m0 = p.memory_info()[0] / 2. ** 30\n print(f'<< {name} >> Start')\n yield\n \n m1 = p.memory_info()[0] / 2. ** 30\n delta = m1 - m0\n sign = '+' if delta >= 0 else '-'\n delta = math.fabs(delta)\n \n print(f\"<< {name} >> {m1:.1f}GB({sign}{delta:.1f}GB):{time.time() - t0:.1f}sec\", file=sys.stderr)",
"_____no_output_____"
]
],
[
[
"<br>\n<h1 style = \"font-size:45px; font-family:Comic Sans MS ; font-weight : normal; background-color: #4c1c84 ; color : #eeebf1; text-align: center; border-radius: 100px 100px;\">\n Data Load\n</h1>\n<br>",
"_____no_output_____"
]
],
[
[
"## Data Check\nfor dirnames, _, filenames in os.walk(INPUT_DIR):\n \n for filename in filenames:\n\n print(f'{dirnames}/{filename}')",
"/mnt/work/data/kaggle/Jigsaw/comments_to_score.csv\n/mnt/work/data/kaggle/Jigsaw/sample_submission.csv\n/mnt/work/data/kaggle/Jigsaw/validation_data.csv\n"
],
[
"val_df = pd.read_csv(\"/mnt/work/data/kaggle/Jigsaw/validation_data.csv\")\ntest_df = pd.read_csv(\"/mnt/work/data/kaggle/Jigsaw/comments_to_score.csv\")\n\ndisplay(val_df.head())\ndisplay(test_df.head())",
"_____no_output_____"
]
],
[
[
"<br>\n<h2 style = \"font-size:45px;\n font-family:Comic Sans MS ;\n font-weight : normal; \n background-color: #eeebf1 ;\n color : #4c1c84;\n text-align: center;\n border-radius: 100px 100px;\">\n Jigsaw Classification\n</h2>\n<br>",
"_____no_output_____"
]
],
[
[
"train_df = pd.read_csv(\"../data/external/jigsaw-unbiased/train.csv\")\ntrain_df = train_df.rename(columns={\"target\": \"toxicity\"})\ntrain_df.iloc[:, :20].head()",
"_____no_output_____"
],
[
"target_cols = [\n \"toxicity\",\n \"severe_toxicity\",\n \"identity_attack\",\n \"insult\",\n \"threat\",\n \"sexual_explicit\"\n]\n\nplt.figure(figsize=(12, 5))\nsns.histplot(train_df[\"toxicity\"], color=\"#4c1c84\")\nplt.grid()\nplt.show()",
"_____no_output_____"
],
[
"def sample_df(df:pd.DataFrame, frac=0.2):\n \n '''\n train_dfからtoxicとnon_toxicを抽出\n non_toxicの割合をfracで調整\n '''\n \n print(f\"Before: {df.shape}\")\n label_cols = [\n \"toxicity\",\n \"severe_toxicity\",\n \"identity_attack\",\n \"insult\",\n \"threat\",\n \"sexual_explicit\"\n ]\n df[\"y\"] = df[label_cols].sum(axis=1)\n df[\"y\"] = df[\"y\"]/df[\"y\"].max()\n\n toxic_df = df[df[\"y\"]>0].reset_index(drop=True)\n nontoxic_df = df[df[\"y\"]==0].reset_index(drop=True)\n nontoxic_df = nontoxic_df.sample(frac=frac, random_state=config.seed)\n \n df = pd.concat([toxic_df, nontoxic_df], axis=0).sample(frac=1).reset_index(drop=True)\n \n print(f\"After: {df.shape}\")\n return df",
"_____no_output_____"
],
[
"with timer(\"sampling df\"):\n \n train_df = sample_df(train_df, frac=0.2)\n display(train_df.head())",
"<< sampling df >> Start\nBefore: (1804874, 45)\nAfter: (809648, 46)\n"
]
],
[
[
"<br>\n<h1 style = \"font-size:45px; font-family:Comic Sans MS ; font-weight : normal; background-color: #4c1c84 ; color : #eeebf1; text-align: center; border-radius: 100px 100px;\">\n Pytorch Dataset\n</h1>\n<br>",
"_____no_output_____"
]
],
[
[
"class JigsawDataset:\n \n def __init__(self, df, tokenizer, max_length, mode, target_cols):\n \n self.df = df\n self.max_len = max_length\n self.tokenizer = tokenizer\n self.mode = mode\n self.target_cols = target_cols\n \n if self.mode == \"train\":\n self.text = df[\"comment_text\"].values\n self.target = df[target_cols].values\n \n elif self.mode == \"valid\":\n self.more_toxic = df[\"more_toxic\"].values\n self.less_toxic = df[\"less_toxic\"].values\n \n else:\n self.text == df[\"text\"].values\n \n def __len__(self):\n return len(self.df)\n \n def __getitem__(self, index):\n \n if self.mode == \"train\":\n \n text = self.text[index]\n target = self.target[index]\n \n inputs_text = self.tokenizer.encode_plus(\n text,\n truncation=True,\n return_attention_mask=True,\n return_token_type_ids=True,\n max_length = self.max_len,\n padding=\"max_length\",\n )\n \n text_ids = inputs_text[\"input_ids\"]\n text_mask = inputs_text[\"attention_mask\"]\n text_token_type_ids = inputs_text[\"token_type_ids\"]\n\n return {\n 'text_ids': torch.tensor(text_ids, dtype=torch.long),\n 'text_mask': torch.tensor(text_mask, dtype=torch.long),\n 'text_token_type_ids': torch.tensor(text_token_type_ids, dtype=torch.long),\n 'target': torch.tensor(target, dtype=torch.float)\n }\n \n elif self.mode == \"valid\":\n \n more_toxic = self.more_toxic[index]\n less_toxic = self.less_toxic[index]\n\n inputs_more_toxic = self.tokenizer.encode_plus(\n more_toxic,\n truncation=True,\n return_attention_mask=True,\n return_token_type_ids=True,\n max_length = self.max_len,\n padding=\"max_length\",\n )\n\n inputs_less_toxic = self.tokenizer.encode_plus(\n less_toxic,\n truncation=True,\n return_attention_mask=True,\n return_token_type_ids=True,\n max_length = self.max_len,\n padding=\"max_length\",\n )\n \n target = 1\n\n more_toxic_ids = inputs_more_toxic[\"input_ids\"]\n more_toxic_mask = inputs_more_toxic[\"attention_mask\"]\n more_token_type_ids = inputs_more_toxic[\"token_type_ids\"]\n\n less_toxic_ids = inputs_less_toxic[\"input_ids\"]\n less_toxic_mask = inputs_less_toxic[\"attention_mask\"]\n less_token_type_ids = inputs_less_toxic[\"token_type_ids\"]\n \n return {\n 'more_toxic_ids': torch.tensor(more_toxic_ids, dtype=torch.long),\n 'more_toxic_mask': torch.tensor(more_toxic_mask, dtype=torch.long),\n 'more_token_type_ids': torch.tensor(more_token_type_ids, dtype=torch.long),\n \n 'less_toxic_ids': torch.tensor(less_toxic_ids, dtype=torch.long),\n 'less_toxic_mask': torch.tensor(less_toxic_mask, dtype=torch.long),\n 'less_token_type_ids': torch.tensor(less_token_type_ids, dtype=torch.long),\n \n 'target': torch.tensor(target, dtype=torch.float)\n }\n \n else:\n \n text = self.text[index]\n \n input_text = self.tokenizer.encode_plus(\n text,\n truncation=True,\n return_attention_mask=True,\n return_token_type_ids=True,\n max_length = self.max_len,\n padding=\"max_length\",\n )\n \n text_ids = inputs_text[\"input_ids\"]\n text_mask = inputs_text[\"attention_mask\"]\n text_token_type_ids = inputs_text[\"token_type_ids\"]\n\n return {\n 'text_ids': torch.tensor(text_ids, dtype=torch.long),\n 'text_mask': torch.tensor(text_mask, dtype=torch.long),\n 'text_token_type_ids': torch.tensor(text_token_type_ids, dtype=torch.long),\n }",
"_____no_output_____"
]
],
[
[
"<br>\n<h2 style = \"font-size:45px;\n font-family:Comic Sans MS ;\n font-weight : normal;\n background-color: #eeebf1 ;\n color : #4c1c84;\n text-align: center; \n border-radius: 100px 100px;\">\n DataModule\n</h2>\n<br>",
"_____no_output_____"
]
],
[
[
"class JigsawDataModule(LightningDataModule):\n\n def __init__(self, train_df, valid_df, test_df, cfg):\n\n super().__init__()\n self._train_df = train_df\n self._valid_df = valid_df\n self._test_df = test_df\n self._cfg = cfg\n\n def train_dataloader(self):\n dataset = JigsawDataset(\n df=self._train_df, \n tokenizer=self._cfg.tokenizer,\n max_length=self._cfg.max_length,\n mode=\"train\",\n target_cols=target_cols\n )\n return DataLoader(dataset, **self._cfg.train_loader)\n\n def val_dataloader(self):\n dataset = JigsawDataset(\n df=self._valid_df, \n tokenizer=self._cfg.tokenizer,\n max_length=self._cfg.max_length,\n mode=\"valid\",\n target_cols=target_cols\n )\n return DataLoader(dataset, **self._cfg.valid_loader)\n\n def test_dataloader(self):\n dataset = JigsawDataset(\n df=self._test_df,\n tokenizer = self._cfg.tokenizer,\n max_length=self._cfg.max_length,\n mode=\"test\",\n target_cols=target_cols\n )\n\n return DataLoader(dataset, **self._cfg.test_loader)",
"_____no_output_____"
],
[
"## DataCheck\nseed_everything(config.seed)\n\nsample_dataloader = JigsawDataModule(train_df, val_df, test_df, config).train_dataloader()\nfor data in sample_dataloader:\n break",
"_____no_output_____"
],
[
"print(data[\"text_ids\"].size())\nprint(data[\"text_mask\"].size())\nprint(data[\"text_token_type_ids\"].size())\nprint(data[\"target\"].size())\nprint(data[\"target\"])\noutput = config.model(\n data[\"text_ids\"],\n data[\"text_mask\"],\n data[\"text_token_type_ids\"],\n output_attentions=True\n)\nprint(output[\"last_hidden_state\"].size(), output[\"attentions\"][-1].size())\nprint(output[\"last_hidden_state\"][:, 0, :].size(), output[\"attentions\"][-1].size())",
"torch.Size([4, 128])\ntorch.Size([4, 128])\ntorch.Size([4, 128])\ntorch.Size([4, 6])\ntensor([[0.1667, 0.0000, 0.0000, 0.1667, 0.0000, 0.0000],\n [0.1667, 0.1667, 0.0000, 0.0000, 0.1667, 0.0000],\n [0.6000, 0.0000, 0.0000, 0.5000, 0.0000, 0.0000],\n [0.2000, 0.0000, 0.0000, 0.0000, 0.2000, 0.0000]])\ntorch.Size([4, 128, 768]) torch.Size([4, 12, 128, 128])\ntorch.Size([4, 768]) torch.Size([4, 12, 128, 128])\n"
]
],
[
[
"<br>\n<h2 style = \"font-size:45px;\n font-family:Comic Sans MS ;\n font-weight : normal;\n background-color: #eeebf1 ;\n color : #4c1c84;\n text-align: center; \n border-radius: 100px 100px;\">\n LigitningModule\n</h2>\n<br>",
"_____no_output_____"
]
],
[
[
"class JigsawModel(pl.LightningModule):\n \n def __init__(self, cfg, fold_num):\n \n super().__init__()\n self.cfg = cfg\n self.__build_model()\n self.criterion = eval(self.cfg.loss)()\n self.save_hyperparameters(cfg)\n self.fold_num = fold_num\n \n def __build_model(self):\n \n self.base_model = DebertaModel.from_pretrained(\n self.cfg.backbone.name\n )\n print(f\"Use Model: {self.cfg.backbone.name}\")\n self.norm = nn.LayerNorm(768)\n self.drop = nn.Dropout(p=0.3)\n self.head = nn.Linear(768, self.cfg.backbone.output_dim)\n \n def forward(self, ids, mask, token_type_ids):\n \n output = self.base_model(\n input_ids=ids, \n attention_mask=mask,\n token_type_ids=token_type_ids,\n output_attentions=True\n )\n feature = self.norm(output[\"last_hidden_state\"][:, 0, :])\n out = self.drop(feature)\n out = self.head(out)\n \n return {\n \"logits\":out, \n \"attention\":output[\"attentions\"], \n \"mask\":mask,\n }\n \n def training_step(self, batch, batch_idx):\n \n text_ids = batch[\"text_ids\"]\n text_mask = batch['text_mask']\n text_token_type_ids = batch['text_token_type_ids']\n targets = batch['target']\n \n outputs = self.forward(text_ids, text_mask, text_token_type_ids)\n loss = self.criterion(outputs[\"logits\"], targets)\n \n return {\n \"loss\":loss,\n \"targets\":targets,\n }\n \n def training_epoch_end(self, training_step_outputs):\n\n loss_list = []\n\n for out in training_step_outputs:\n\n loss_list.extend([out[\"loss\"].cpu().detach().tolist()])\n\n meanloss = sum(loss_list)/len(loss_list)\n\n logs = {f\"train_loss/fold{self.fold_num+1}\": meanloss,}\n\n self.log_dict(\n logs,\n on_step=False,\n on_epoch=True,\n prog_bar=True,\n logger=True\n )\n \n def validation_step(self, batch, batch_idx):\n\n more_toxic_ids = batch['more_toxic_ids']\n more_toxic_mask = batch['more_toxic_mask']\n more_text_token_type_ids = batch['more_token_type_ids']\n \n less_toxic_ids = batch['less_toxic_ids']\n less_toxic_mask = batch['less_toxic_mask']\n less_text_token_type_ids = batch['less_token_type_ids']\n \n targets = batch['target']\n\n more_outputs = self.forward(\n more_toxic_ids, \n more_toxic_mask,\n more_text_token_type_ids\n )\n \n less_outputs = self.forward(\n less_toxic_ids, \n less_toxic_mask,\n less_text_token_type_ids\n )\n \n \n more_outputs = torch.sum(more_outputs[\"logits\"], 1)\n less_outputs = torch.sum(less_outputs[\"logits\"], 1)\n \n outputs = more_outputs - less_outputs\n logits = outputs.clone()\n\n logits[logits > 0] = 1\n loss = self.criterion(logits, targets)\n\n return {\n \"loss\":loss,\n \"pred\":outputs,\n \"targets\":targets,\n }\n \n \n def validation_epoch_end(self, validation_step_outputs):\n\n loss_list = []\n pred_list = []\n target_list = []\n\n for out in validation_step_outputs:\n loss_list.extend([out[\"loss\"].cpu().detach().tolist()])\n pred_list.append(out[\"pred\"].detach().cpu().numpy())\n target_list.append(out[\"targets\"].detach().cpu().numpy())\n\n meanloss = sum(loss_list)/len(loss_list)\n pred_list = np.concatenate(pred_list)\n pred_count = sum(x>0 for x in pred_list)/len(pred_list)\n\n logs = {\n f\"valid_loss/fold{self.fold_num+1}\":meanloss,\n f\"valid_acc/fold{self.fold_num+1}\":pred_count,\n }\n\n self.log_dict(\n logs,\n on_step=False,\n on_epoch=True,\n prog_bar=True,\n logger=True\n )\n \n def configure_optimizers(self):\n\n optimizer = eval(self.cfg.optimizer.name)(\n self.parameters(), **self.cfg.optimizer.params\n )\n\n self.scheduler = eval(self.cfg.scheduler.name)(\n optimizer, **self.cfg.scheduler.params\n )\n \n scheduler = {\"scheduler\": self.scheduler, \"interval\": \"step\",}\n\n return [optimizer], [scheduler]",
"_____no_output_____"
]
],
[
[
"<br>\n<h2 style = \"font-size:45px;\n font-family:Comic Sans MS ;\n font-weight : normal;\n background-color: #eeebf1 ;\n color : #4c1c84;\n text-align: center; \n border-radius: 100px 100px;\">\n Training\n</h2>\n<br>",
"_____no_output_____"
]
],
[
[
"sns.distplot(train_df[\"y\"])",
"_____no_output_____"
],
[
"skf = KFold(\n n_splits=config.n_fold, \n shuffle=True, \n random_state=config.seed\n)\n\n\nfor fold, (_, val_idx) in enumerate(skf.split(X=train_df, y=train_df[\"y\"])):\n train_df.loc[val_idx, \"kfold\"] = int(fold)\n\ntrain_df[\"kfold\"] = train_df[\"kfold\"].astype(int)\ntrain_df.head()",
"_____no_output_____"
],
[
"## Debug\nconfig.trainer.fast_dev_run = True\nconfig.backbone.output_dim = len(target_cols)\n\nfor fold in config.train_fold:\n \n print(\"★\"*25, f\" Fold{fold+1} \", \"★\"*25)\n\n df_train = train_df[train_df.kfold != fold].reset_index(drop=True)\n\n datamodule = JigsawDataModule(df_train, val_df, test_df, config)\n sample_dataloader = JigsawDataModule(df_train, val_df, test_df, config).train_dataloader()\n\n config.scheduler.params.T_0 = config.epoch * len(sample_dataloader)\n model = JigsawModel(config, fold)\n lr_monitor = callbacks.LearningRateMonitor()\n\n loss_checkpoint = callbacks.ModelCheckpoint(\n filename=f\"best_acc_fold{fold+1}\",\n monitor=f\"valid_acc/fold{fold+1}\",\n save_top_k=1,\n mode=\"max\",\n save_last=False,\n dirpath=MODEL_DIR,\n )\n\n wandb_logger = WandbLogger(\n project=config.project, \n entity=config.entity,\n name = f\"{config.exp_name}\",\n tags = ['DeBERTa-Base', \"Jigsaw-Unbiased\"]\n )\n\n lr_monitor = LearningRateMonitor(logging_interval='step')\n\n trainer = pl.Trainer(\n max_epochs=config.epoch,\n callbacks=[loss_checkpoint, lr_monitor, RichProgressBar()],\n# deterministic=True,\n logger=[wandb_logger],\n **config.trainer\n )\n trainer.fit(model, datamodule=datamodule)",
"★★★★★★★★★★★★★★★★★★★★★★★★★ Fold1 ★★★★★★★★★★★★★★★★★★★★★★★★★\n"
],
[
"## Training\nconfig.trainer.fast_dev_run = False\nconfig.backbone.output_dim = len(target_cols)\n\nfor fold in config.train_fold:\n \n print(\"★\"*25, f\" Fold{fold+1} \", \"★\"*25)\n\n df_train = train_df[train_df.kfold != fold].reset_index(drop=True)\n\n datamodule = JigsawDataModule(df_train, val_df, test_df, config)\n sample_dataloader = JigsawDataModule(df_train, val_df, test_df, config).train_dataloader()\n\n config.scheduler.params.T_0 = config.epoch * len(sample_dataloader)\n model = JigsawModel(config, fold)\n lr_monitor = callbacks.LearningRateMonitor()\n\n loss_checkpoint = callbacks.ModelCheckpoint(\n filename=f\"best_acc_fold{fold+1}\",\n monitor=f\"valid_acc/fold{fold+1}\",\n save_top_k=1,\n mode=\"max\",\n save_last=False,\n dirpath=MODEL_DIR,\n )\n\n wandb_logger = WandbLogger(\n project=config.project, \n entity=config.entity,\n name = f\"{config.exp_name}\",\n tags = ['DeBERTa-Base', \"Jigsaw-Unbiased\"]\n )\n\n lr_monitor = LearningRateMonitor(logging_interval='step')\n\n trainer = pl.Trainer(\n max_epochs=config.epoch,\n callbacks=[loss_checkpoint, lr_monitor, RichProgressBar()],\n# deterministic=True,\n logger=[wandb_logger],\n **config.trainer\n )\n trainer.fit(model, datamodule=datamodule)",
"★★★★★★★★★★★★★★★★★★★★★★★★★ Fold1 ★★★★★★★★★★★★★★★★★★★★★★★★★\n"
],
[
"device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nprint(f\"Device == {device}\")\n\nMORE = np.zeros(len(val_df))\nLESS = np.zeros(len(val_df))\nPRED = np.zeros(len(test_df))\n\nattention_array = np.zeros((len(df), 256)) # attention格納\nmask_array = np.zeros((len(df), 256)) # mask情報格納,後でattentionと掛け合わせる\n\nfor fold in config.train_fold:\n\n pred_list = []\n print(\"★\"*25, f\" Fold{fold+1} \", \"★\"*25)\n\n valid_dataloader = JigsawDataModule(train_df, val_df, test_df, config).val_dataloader()\n model = JigsawModel(config, fold)\n\n loss_checkpoint = callbacks.ModelCheckpoint(\n filename=f\"best_acc_fold{fold+1}\",\n monitor=f\"valid_acc/fold{fold+1}\",\n save_top_k=1,\n mode=\"max\",\n save_last=False,\n dirpath=\"../input/toxicroberta/\",\n )\n model = model.load_from_checkpoint(MODEL_DIR/f\"best_acc_fold{fold+1}.ckpt\", cfg=config, fold_num=fold)\n model.to(device)\n model.eval()\n \n more_list = []\n less_list = []\n \n for step, data in tqdm(enumerate(valid_dataloader), total=len(valid_dataloader)):\n\n more_toxic_ids = data['more_toxic_ids'].to(device)\n more_toxic_mask = data['more_toxic_mask'].to(device)\n more_text_token_type_ids = data['more_token_type_ids'].to(device)\n \n less_toxic_ids = data['less_toxic_ids'].to(device)\n less_toxic_mask = data['less_toxic_mask'].to(device)\n less_text_token_type_ids = data['less_token_type_ids'].to(device)\n \n more_outputs = model(\n more_toxic_ids, \n more_toxic_mask,\n more_text_token_type_ids,\n )\n \n less_outputs = model(\n less_toxic_ids, \n less_toxic_mask,\n less_text_token_type_ids\n )\n \n more_list.append(more_outputs[:, 0].detach().cpu().numpy())\n less_list.append(less_outputs[:, 0].detach().cpu().numpy())\n\n MORE += np.concatenate(more_list)/len(config.train_fold)\n LESS += np.concatenate(less_list)/len(config.train_fold)\n# PRED += pred_list/len(config.train_fold)",
"Device == cuda\n★★★★★★★★★★★★★★★★★★★★★★★★★ Fold1 ★★★★★★★★★★★★★★★★★★★★★★★★★\n"
],
[
"plt.figure(figsize=(12, 5))\nplt.scatter(LESS, MORE)\nplt.xlabel(\"less-toxic\")\nplt.ylabel(\"more-toxic\")\nplt.grid()\nplt.show()",
"_____no_output_____"
],
[
"val_df[\"less_attack\"] = LESS\nval_df[\"more_attack\"] = MORE\nval_df[\"diff_attack\"] = val_df[\"more_attack\"] - val_df[\"less_attack\"]\nattack_score = val_df[val_df[\"diff_attack\"]>0][\"diff_attack\"].count()/len(val_df)\nprint(f\"Wiki Attack Score: {attack_score:.6f}\")",
"Wiki Attack Score: 0.687126\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a1717a0427a72d5e3e506e7544c9d35518d78fd
| 5,008 |
ipynb
|
Jupyter Notebook
|
src/utils/Untitled.ipynb
|
PawelRosikiewicz/Swissroads
|
97d65ef8977b111de8feb33d7a3596d6ff5bf6be
|
[
"MIT"
] | null | null | null |
src/utils/Untitled.ipynb
|
PawelRosikiewicz/Swissroads
|
97d65ef8977b111de8feb33d7a3596d6ff5bf6be
|
[
"MIT"
] | null | null | null |
src/utils/Untitled.ipynb
|
PawelRosikiewicz/Swissroads
|
97d65ef8977b111de8feb33d7a3596d6ff5bf6be
|
[
"MIT"
] | null | null | null | 34.30137 | 115 | 0.468251 |
[
[
[
"def create_simple_convnet_model(*, input_size, output_size, verbose=False, **kwargs):\n \n # Convolutional Network, ........................\n model = keras.Sequential()\n\n #.. 1st cnn, layer\n model.add(keras.layers.Conv2D(\n filters=kwargs['Conv2D_1__filters'], \n kernel_size=kwargs['Conv2D_1__kernel_size'], \n strides=kwargs['Conv2D_1__stride'],\n activation=kwargs['Conv2D_1__activation'], \n input_shape=input_size\n ))\n\n #.. maxpool 1.\n model.add(keras.layers.MaxPool2D(pool_size=kwargs['MaxPool2D_1__pool_size']))\n\n #.. 2nd cnn layer, \n model.add(keras.layers.Conv2D(\n filters=kwargs['Conv2D_2__filters'], \n kernel_size=kwargs['Conv2D_2__kernel_size'], \n strides=kwargs['Conv2D_2__stride'],\n activation=kwargs['Conv2D_2__activation'], \n ))\n\n #.. maxpool 2, \n model.add(keras.layers.MaxPool2D(pool_size=kwargs['MaxPool2D_2__pool_size']))\n\n \n # flatten the results, .........................\n model.add(keras.layers.Flatten())\n \n \n # dense nn, ....................................\n \n #.. First hidden layer\n model.add(Dense(\n units=kwargs['h1_unit_size'], \n activation=kwargs[\"h1_activation\"], \n kernel_initializer=initializers.VarianceScaling(scale=2.0, seed=0)\n ))\n model.add(tf.keras.layers.Dropout(kwargs[\"h1_Dropout\"]))\n \n #.. Output layer\n model.add(Dense( \n units=output_size, \n activation=kwargs[\"out_activation\"],\n kernel_regularizer=tf.keras.regularizers.l2(0.001),\n kernel_initializer=initializers.VarianceScaling(scale=1.0, seed=0)\n ))\n\n \n # Print network summary\n if verbose==True:\n print(model.summary())\n else:\n pass\n\n \n # Define Loss Function and Trianing Operation \n model.compile(\n optimizer= kwargs[\"optimizer\"],\n loss= losses.sparse_categorical_crossentropy,\n metrics= kwargs[\"metrics\"] # even one arg must be in the list\n )\n \n return model \n \n \n \n ",
"_____no_output_____"
],
[
"grid = ParameterGrid({\n \"random_state\":[0], # used to divide train,valid datasets, \n \"train_test_split__train_size\":[0.7],\n # -- generators, ........................\n \"generator__batch_size\": [20],\n \"generator__target_size\" :[(299, 299)], # tuple, for each image x,y dimension in pixels, \n \"generator__validation_split\" : [0.3], # caution its opposite then in train_test_split__train_size\"\n # -- conv model, ........................\n \"model\":[\"two_layers\"],\n 'Conv2D_1__filters': [64], \n 'Conv2D_1__kernel_size': [5], \n 'Conv2D_1__stride': [2], \n 'Conv2D_1__activation' : ['relu'], \n 'MaxPool2D_1__pool_size': [2], \n 'Conv2D_2__filters': [64], \n 'Conv2D_2__kernel_size': [3], \n 'Conv2D_2__stride': [1], \n 'Conv2D_2__activation' : [\"relu\"], \n 'MaxPool2D_2__pool_size': [2], \n # -- dense nn, ...........................\n \"h1_unit_size\":[24],\n \"h1_Dropout\" : [0],\n \"h1_activation\": [\"relu\"],\n \"out_activation\":[\"softmax\"],\n \"optimizer\":[\"Adam\"],\n \"metrics\": [[\"acc\"]],\n # -- training, ...........................\n \"EarlyStopping__patience\": [6],\n \"fit_generator__epoch\": [2] \n \n})\n\nparams",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code"
]
] |
4a171ce84909c90e7d23cfb96beec84888634b80
| 2,136 |
ipynb
|
Jupyter Notebook
|
Chapter 12/pipelines/05 - Working with model packages.ipynb
|
amikewatson/Learn-Amazon-SageMaker-second-edition
|
64955fd96a5917d8d4d5e18a6dfc57a5432250be
|
[
"MIT"
] | 15 |
2021-10-01T02:36:24.000Z
|
2022-03-02T23:37:04.000Z
|
Chapter 12/pipelines/05 - Working with model packages.ipynb
|
amikewatson/Learn-Amazon-SageMaker-second-edition
|
64955fd96a5917d8d4d5e18a6dfc57a5432250be
|
[
"MIT"
] | null | null | null |
Chapter 12/pipelines/05 - Working with model packages.ipynb
|
amikewatson/Learn-Amazon-SageMaker-second-edition
|
64955fd96a5917d8d4d5e18a6dfc57a5432250be
|
[
"MIT"
] | 14 |
2021-10-30T14:21:43.000Z
|
2022-03-11T02:14:28.000Z
| 21.148515 | 104 | 0.582397 |
[
[
[
"## Find information about a model package",
"_____no_output_____"
]
],
[
[
"import boto3\nimport pprint",
"_____no_output_____"
],
[
"sm = boto3.client('sagemaker')",
"_____no_output_____"
],
[
"model_package_group_name = 'blazing-text-on-amazon-customer-reviews-package'",
"_____no_output_____"
],
[
"response = sm.describe_model_package_group(ModelPackageGroupName=model_package_group_name)\npprint.pprint(response)",
"_____no_output_____"
],
[
"response = sm.list_model_packages(ModelPackageGroupName=model_package_group_name)\npprint.pprint(response)",
"_____no_output_____"
],
[
"model_package_name = response['ModelPackageSummaryList'][0]['ModelPackageArn']",
"_____no_output_____"
],
[
"response = sm.describe_model_package(ModelPackageName=model_package_name)\npprint.pprint(response)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a171e6de087a620e91df6540bc2f81a3298db45
| 7,154 |
ipynb
|
Jupyter Notebook
|
MaterialCursoPython/Fase 2 - Manejo de datos y optimizacion/Tema 07 - Gestion de errores/Apuntes/Leccion 2 (Apuntes) - Excepciones.ipynb
|
mangrovex/CursoPython
|
85b3d8a920f79a1f184b8508cf011fda238eada0
|
[
"MIT"
] | 105 |
2016-07-08T19:43:03.000Z
|
2018-10-20T14:00:14.000Z
|
Fase 2 - Manejo de datos y optimizacion/Tema 07 - Gestion de errores/Apuntes/Leccion 2 (Apuntes) - Excepciones.ipynb
|
ruben69695/python-course
|
a3d3532279510fa0315a7636c373016c7abe4f0a
|
[
"MIT"
] | null | null | null |
Fase 2 - Manejo de datos y optimizacion/Tema 07 - Gestion de errores/Apuntes/Leccion 2 (Apuntes) - Excepciones.ipynb
|
ruben69695/python-course
|
a3d3532279510fa0315a7636c373016c7abe4f0a
|
[
"MIT"
] | 145 |
2016-09-26T14:02:55.000Z
|
2018-10-27T06:49:28.000Z
| 31.377193 | 924 | 0.552278 |
[
[
[
"# Las excepciones\nSon bloques de código excepcionales que nos permiten continuar con la ejecución de un programa pese a que ocurra un error.\n### Siguiendo con el ejemplo de la lección anterior\nTeníamos el caso en que leíamos un número por teclado, pero el usuario no introducía un número:",
"_____no_output_____"
]
],
[
[
"n = float(input(\"Introduce un número: \"))\nm = 4\nprint(\"{}/{}={}\".format(n,m,n/m))",
"Introduce un número: aaa\n"
]
],
[
[
"### Creando la excepción - Bloques try y except\nPara prevenir el error, debemos poner el código propenso a error un bloque **try** y luego encadenaremos un bloque **except** para tratar la excepción:",
"_____no_output_____"
]
],
[
[
"try:\n n = float(input(\"Introduce un número: \"))\n m = 4\n print(\"{}/{}={}\".format(n,m,n/m))\nexcept:\n print(\"Ha ocurrido un error, introduce bien el número\")",
"Introduce un número: aaa\nHa ocurrido un error, introduce bien el número\n"
]
],
[
[
"#### Utilizando un while(true), podemos asegurárnos de que el usuario introduce bien el valor\nRepitiendo la lectura por teclado hasta que lo haga bien, y entonces rompemos el bucle con un break:",
"_____no_output_____"
]
],
[
[
"while(True):\n try:\n n = float(input(\"Introduce un número: \"))\n m = 4\n print(\"{}/{}={}\".format(n,m,n/m))\n break # Importante romper la iteración si todo ha salido bien\n except:\n print(\"Ha ocurrido un error, introduce bien el número\")",
"Introduce un número: aaa\nHa ocurrido un error, introduce bien el número\nIntroduce un número: sdsdsd\nHa ocurrido un error, introduce bien el número\nIntroduce un número: sdsdsd\nHa ocurrido un error, introduce bien el número\nIntroduce un número: sdsd\nHa ocurrido un error, introduce bien el número\nIntroduce un número: 10\n10.0/4=2.5\n"
]
],
[
[
"### Bloque else en excepciones\nEs posible encadenar un bloque else después del *except* para comprobar el caso en que **todo funcione correctamente** (no se ejecuta la excepción).\n\nEl bloque *else* es un buen momento para romper la iteración con *break* si todo funciona correctamente:",
"_____no_output_____"
]
],
[
[
"while(True):\n try:\n n = float(input(\"Introduce un número: \"))\n m = 4\n print(\"{}/{}={}\".format(n,m,n/m))\n except:\n print(\"Ha ocurrido un error, introduce bien el número\")\n else:\n print(\"Todo ha funcionado correctamente\")\n break # Importante romper la iteración si todo ha salido bien",
"Introduce un número: 10\n10.0/4=2.5\nTodo ha funcionado correctamente\n"
]
],
[
[
"### Bloque finally en excepciones\nPor último es posible utilizar un bloque *finally* que se ejecute al final del código, **ocurra o no ocurra un error**:",
"_____no_output_____"
]
],
[
[
"while(True):\n try:\n n = float(input(\"Introduce un número: \"))\n m = 4\n print(\"{}/{}={}\".format(n,m,n/m))\n except:\n print(\"Ha ocurrido un error, introduce bien el número\")\n else:\n print(\"Todo ha funcionado correctamente\")\n break # Importante romper la iteración si todo ha salido bien\n finally:\n print(\"Fin de la iteración\") # Siempre se ejecuta",
"Introduce un número: aaa\nHa ocurrido un error, introduce bien el número\nFin de la iteración\nIntroduce un número: 10\n10.0/4=2.5\nTodo ha funcionado correctamente\nFin de la iteración\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a172002c28a677443a2ca7405ca83ed3ac9767d
| 6,270 |
ipynb
|
Jupyter Notebook
|
intro/concatenator-in progress.ipynb
|
ktakimoto/core-resources
|
c4874af43a961c9d60907ba1661a67091cdc27b1
|
[
"MIT"
] | 7 |
2017-09-13T18:07:48.000Z
|
2019-04-02T04:45:46.000Z
|
intro/concatenator-in progress.ipynb
|
ktakimoto/core-resources
|
c4874af43a961c9d60907ba1661a67091cdc27b1
|
[
"MIT"
] | null | null | null |
intro/concatenator-in progress.ipynb
|
ktakimoto/core-resources
|
c4874af43a961c9d60907ba1661a67091cdc27b1
|
[
"MIT"
] | 23 |
2017-04-08T23:26:19.000Z
|
2021-08-15T17:06:28.000Z
| 38.231707 | 156 | 0.627432 |
[
[
[
"# put files you want as a list here\n# i.e. files = ['intro-tables.ipynb', 'intro-strings.ipynb']\nfiles = ['intro-tables.ipynb', 'intro-strings.ipynb', 'intro-matplotlib.ipynb']\n\n# put your intro ipynb file name here\n# i.e. file_output_name = \"example.ipynb\"\nfile_output_name = \"example.ipynb\"",
"_____no_output_____"
],
[
"!pip install datascience\n\nimport numpy as np\nfrom datascience import *",
"Requirement already satisfied: datascience in /Users/kellychen/anaconda/lib/python3.5/site-packages\nRequirement already satisfied: pytest in /Users/kellychen/anaconda/lib/python3.5/site-packages (from datascience)\nRequirement already satisfied: setuptools in /Users/kellychen/anaconda/lib/python3.5/site-packages/setuptools-27.2.0-py3.5.egg (from datascience)\nRequirement already satisfied: coveralls==0.5 in /Users/kellychen/anaconda/lib/python3.5/site-packages (from datascience)\nRequirement already satisfied: folium==0.1.5 in /Users/kellychen/anaconda/lib/python3.5/site-packages (from datascience)\nRequirement already satisfied: coverage==3.7.1 in /Users/kellychen/anaconda/lib/python3.5/site-packages (from datascience)\nRequirement already satisfied: sphinx in /Users/kellychen/anaconda/lib/python3.5/site-packages/Sphinx-1.4.6-py3.5.egg (from datascience)\nRequirement already satisfied: py>=1.4.29 in /Users/kellychen/anaconda/lib/python3.5/site-packages (from pytest->datascience)\nRequirement already satisfied: requests>=1.0.0 in /Users/kellychen/anaconda/lib/python3.5/site-packages (from coveralls==0.5->datascience)\nRequirement already satisfied: PyYAML>=3.10 in /Users/kellychen/anaconda/lib/python3.5/site-packages (from coveralls==0.5->datascience)\nRequirement already satisfied: docopt>=0.6.1 in /Users/kellychen/anaconda/lib/python3.5/site-packages (from coveralls==0.5->datascience)\nRequirement already satisfied: six>=1.5 in /Users/kellychen/anaconda/lib/python3.5/site-packages (from sphinx->datascience)\nRequirement already satisfied: Jinja2>=2.3 in /Users/kellychen/anaconda/lib/python3.5/site-packages (from sphinx->datascience)\nRequirement already satisfied: Pygments>=2.0 in /Users/kellychen/anaconda/lib/python3.5/site-packages (from sphinx->datascience)\nRequirement already satisfied: docutils>=0.11 in /Users/kellychen/anaconda/lib/python3.5/site-packages (from sphinx->datascience)\nRequirement already satisfied: snowballstemmer>=1.1 in /Users/kellychen/anaconda/lib/python3.5/site-packages (from sphinx->datascience)\nRequirement already satisfied: babel!=2.0,>=1.3 in /Users/kellychen/anaconda/lib/python3.5/site-packages (from sphinx->datascience)\nRequirement already satisfied: alabaster<0.8,>=0.7 in /Users/kellychen/anaconda/lib/python3.5/site-packages (from sphinx->datascience)\nRequirement already satisfied: imagesize in /Users/kellychen/anaconda/lib/python3.5/site-packages (from sphinx->datascience)\nRequirement already satisfied: MarkupSafe in /Users/kellychen/anaconda/lib/python3.5/site-packages (from Jinja2>=2.3->sphinx->datascience)\nRequirement already satisfied: pytz>=0a in /Users/kellychen/anaconda/lib/python3.5/site-packages (from babel!=2.0,>=1.3->sphinx->datascience)\n"
],
[
"files = make_array('control-intro.ipynb', 'functions-calling.ipynb',\n 'functions.ipynb', 'intro-malpotlib.ipynb',\n 'intro-matpotlib-sol.ipynb', 'intro-module-final.ipynb', \n 'intro-pandas.ipynb', 'intro-pandas-sol.ipynb', \n 'intro-strings.ipynb', 'intro-tables.ipynb', \n 'intro-tables-sol-updated.ipynb', 'Jupyter Notebook Intro.ipynb', \n 'scope.ipynb', 'variables-arithmetic-type.ipynb', \n 'variables-intro.ipynb', 'variables-type-intro.ipynb',\n 'what-is-programming-intro.ipynb')\nfile_keys = make_array('control', 'function call', 'functions')",
"_____no_output_____"
],
[
"import json",
"_____no_output_____"
],
[
"with open(files[0]) as json_data:\n first = json.load(json_data)\n\nfor file in files[1:]:\n with open(file) as json_data:\n x = json.load(json_data)\n first['cells'] = first['cells'] + x['cells']",
"_____no_output_____"
],
[
"with open(file_output_name, 'w') as outfile:\n json.dump(first, outfile)",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a1730ca60316c5d9f3fb51bffae90bea978717d
| 147,743 |
ipynb
|
Jupyter Notebook
|
Notebooks/visualizations/visualizations_in_data.ipynb
|
sidneyarcidiacono/QL1.1
|
ffa8ffeaac19e18b8ef4f8da3b9bd4aefaadd956
|
[
"MIT"
] | null | null | null |
Notebooks/visualizations/visualizations_in_data.ipynb
|
sidneyarcidiacono/QL1.1
|
ffa8ffeaac19e18b8ef4f8da3b9bd4aefaadd956
|
[
"MIT"
] | null | null | null |
Notebooks/visualizations/visualizations_in_data.ipynb
|
sidneyarcidiacono/QL1.1
|
ffa8ffeaac19e18b8ef4f8da3b9bd4aefaadd956
|
[
"MIT"
] | null | null | null | 206.633566 | 23,192 | 0.77201 |
[
[
[
"# Visualizations in Data\n\nData visualization is the presentation of data in graphical format. Data visualization is both an art and a science as it combines creating visualizations that are both engaging and accurate. In matheimatical applications visualizations can help you better observe trends and patterns in data, or discribe large datasets in a concise way. In this lesson we will focus on some of the most common graphs used to visualize data and describe some tools in Python that can help you create these visualizations!",
"_____no_output_____"
],
[
"## Matplotlib\nTo create our visulizations in Python we will be using the matplotlib library, which will give us the tools to easily create graphs and customiza them. We will cover some of the matplotlib functionality in this lesson, but check out [this resource](https://heartbeat.fritz.ai/introduction-to-matplotlib-data-visualization-in-python-d9143287ae39) if you want some more introduction to how to use this library.",
"_____no_output_____"
],
[
"## Dataset\nIn this lesson we will be using the drinks file from the [Starbucks nutrition dataset](https://www.kaggle.com/starbucks/starbucks-menu). This dataset includes the nutritional information for Starbucks’ food and drink menu items. All nutritional information for drinks are for a 12oz serving size.",
"_____no_output_____"
],
[
"## Using and storing data\nData is often stored in comma separated value files or .csv files. For many interesting datasets to try in your projects check out [Kaggle](https://www.kaggle.com/). In this lesson we will be using the starbucks drinks csv file as the basis for our visualizations. In python there is a library called csv that makes handling csv files easier. Take a look at the example below to learn more about how to use this library. ",
"_____no_output_____"
]
],
[
[
"import csv #the csv library\nwith open('starbucks_drinkMenu_expanded.csv') as csvfile: #open the file\n #creates a csv reader object which stores the lines of the files in lists and lets us iterate over them\n drinksreader = csv.reader(csvfile) \n headers = next(drinksreader, None) #skip over the headers\n for row in drinksreader:\n print(row)#take a look at what is being printed out",
"['Coffee', 'Brewed Coffee', 'Short', '3', '0.1', '0', '0', '0', '5', '0', '0', '0', '0.3', '0%', '0%', '0%', '0%', '175']\n['Coffee', 'Brewed Coffee', 'Tall', '4', '0.1', '0', '0', '0', '10', '0', '0', '0', '0.5', '0%', '0%', '0%', '0%', '260']\n['Coffee', 'Brewed Coffee', 'Grande', '5', '0.1', '0', '0', '0', '10', '0', '0', '0', '1', '0%', '0%', '0%', '0%', '330']\n['Coffee', 'Brewed Coffee', 'Venti', '5', '0.1', '0', '0', '0', '10', '0', '0', '0', '1', '0%', '0%', '2%', '0%', '410']\n['Classic Espresso Drinks', 'Caffè Latte', 'Short Nonfat Milk', '70', '0.1', '0.1', '0', '5', '75', '10', '0', '9', '6', '10%', '0%', '20%', '0%', '75']\n['Classic Espresso Drinks', 'Caffè Latte', '2% Milk', '100', '3.5', '2', '0.1', '15', '85', '10', '0', '9', '6', '10%', '0%', '20%', '0%', '75']\n['Classic Espresso Drinks', 'Caffè Latte', 'Soymilk', '70', '2.5', '0.4', '0', '0', '65', '6', '1', '4', '5', '6%', '0%', '20%', '8%', '75']\n['Classic Espresso Drinks', 'Caffè Latte', 'Tall Nonfat Milk', '100', '0.2', '0.2', '0', '5', '120', '15', '0', '14', '10', '15%', '0%', '30%', '0%', '75']\n['Classic Espresso Drinks', 'Caffè Latte', '2% Milk', '150', '6', '3', '0.2', '25', '135', '15', '0', '14', '10', '15%', '0%', '30%', '0%', '75']\n['Classic Espresso Drinks', 'Caffè Latte', 'Soymilk', '110', '4.5', '0.5', '0', '0', '105', '10', '1', '6', '8', '10%', '0%', '30%', '15%', '75']\n['Classic Espresso Drinks', 'Caffè Latte', 'Grande Nonfat Milk', '130', '0.3', '0.2', '0', '5', '150', '19', '0', '18', '13', '20%', '0%', '40%', '0%', '150']\n['Classic Espresso Drinks', 'Caffè Latte', '2% Milk', '190', '7', '3.5', '0.2', '30', '170', '19', '0', '17', '12', '20%', '2%', '40%', '0%', '150']\n['Classic Espresso Drinks', 'Caffè Latte', 'Soymilk', '150', '5', '0.5', '0', '0', '130', '13', '1', '8', '10', '15%', '0%', '40%', '15%', '150']\n['Classic Espresso Drinks', 'Caffè Latte', 'Venti Nonfat Milk', '170', '0.4', '0.3', '0', '10', '190', '25', '0', '23', '16', '30%', '0%', '50%', '0%', '150']\n['Classic Espresso Drinks', 'Caffè Latte', '2% Milk', '240', '9', '4.5', '0.3', '35', '220', '24', '0', '22', '16', '25%', '2%', '50%', '0%', '150']\n['Classic Espresso Drinks', 'Caffè Latte', 'Soymilk', '190', '7', '1', '0', '0', '170', '16', '2', '11', '13', '20%', '0%', '50%', '25%', '150']\n['Classic Espresso Drinks', 'Caffè Mocha (Without Whipped Cream)', 'Short Nonfat Milk', '110', '1.5', '1', '0', '5', '60', '21', '1', '17', '7', '8%', '0%', '15%', '10%', '85']\n['Classic Espresso Drinks', 'Caffè Mocha (Without Whipped Cream)', '2% Milk', '130', '4', '2', '0.1', '10', '70', '21', '1', '17', '6', '8%', '0%', '15%', '10%', '85']\n['Classic Espresso Drinks', 'Caffè Mocha (Without Whipped Cream)', 'Soymilk', '110', '3.5', '1', '0', '0', '55', '19', '2', '13', '6', '6%', '0%', '15%', '20%', '85']\n['Classic Espresso Drinks', 'Caffè Mocha (Without Whipped Cream)', 'Tall Nonfat Milk', '170', '2', '1', '0', '5', '100', '32', '1', '27', '10', '15%', '0%', '25%', '20%', '95']\n['Classic Espresso Drinks', 'Caffè Mocha (Without Whipped Cream)', '2% Milk', '200', '6', '3.5', '0.1', '20', '115', '32', '1', '26', '10', '15%', '0%', '25%', '20%', '95']\n['Classic Espresso Drinks', 'Caffè Mocha (Without Whipped Cream)', 'Soymilk', '180', '5', '1.5', '0', '0', '85', '28', '2', '20', '9', '10%', '0%', '25%', '30%', '95']\n['Classic Espresso Drinks', 'Caffè Mocha (Without Whipped Cream)', 'Grande Nonfat Milk', '220', '2.5', '1.5', '0', '5', '125', '43', '2', '34', '13', '20%', '0%', '35%', '25%', '175']\n['Classic Espresso Drinks', 'Caffè Mocha (Without Whipped Cream)', '2% Milk', '260', '8', '4.5', '0.2', '25', '140', '42', '2', '34', '13', '15%', '2%', '35%', '25%', '175']\n['Classic Espresso Drinks', 'Caffè Mocha (Without Whipped Cream)', 'Soymilk', '230', '7', '2', '0', '0', '105', '37', '3', '26', '11', '10%', '0%', '35%', '40%', '175']\n['Classic Espresso Drinks', 'Caffè Mocha (Without Whipped Cream)', 'Venti Nonfat Milk', '280', '3', '2', '0', '10', '160', '54', '2', '44', '17', '25%', '0%', '45%', '30%', '180']\n['Classic Espresso Drinks', 'Caffè Mocha (Without Whipped Cream)', '2% Milk', '340', '11', '6', '0.2', '30', '180', '53', '2', '43', '17', '20%', '2%', '45%', '30%', '180']\n['Classic Espresso Drinks', 'Caffè Mocha (Without Whipped Cream)', 'Soymilk', '290', '9', '2.5', '0', '0', '140', '47', '4', '33', '14', '15%', '0%', '45%', '50%', '180']\n['Classic Espresso Drinks', 'Vanilla Latte (Or Other Flavoured Latte)', 'Short Nonfat Milk', '100', '0.1', '0.1', '0', '5', '70', '19', '0', '18', '6', '10%', '0%', '20%', '0%', '75']\n['Classic Espresso Drinks', 'Vanilla Latte (Or Other Flavoured Latte)', '2% Milk', '130', '3.5', '1.5', '0.1', '15', '80', '18', '0', '17', '6', '10%', '0%', '20%', '0%', '75']\n['Classic Espresso Drinks', 'Vanilla Latte (Or Other Flavoured Latte)', 'Soymilk', '110', '2.5', '0.3', '0', '0', '60', '16', '1', '13', '5', '6%', '0%', '20%', '8%', '75']\n['Classic Espresso Drinks', 'Vanilla Latte (Or Other Flavoured Latte)', 'Tall Nonfat Milk', '150', '0.2', '0.1', '0', '5', '110', '28', '0', '27', '9', '15%', '0%', '30%', '0%', '75']\n['Classic Espresso Drinks', 'Vanilla Latte (Or Other Flavoured Latte)', '2% Milk', '200', '5', '2.5', '0.2', '20', '125', '28', '0', '27', '9', '15%', '0%', '30%', '0%', '75']\n['Classic Espresso Drinks', 'Vanilla Latte (Or Other Flavoured Latte)', 'Soymilk', '160', '4', '0.5', '0', '0', '95', '23', '1', '20', '7', '10%', '0%', '30%', '15%', '75']\n['Classic Espresso Drinks', 'Vanilla Latte (Or Other Flavoured Latte)', 'Grande Nonfat Milk', '200', '0.3', '0.2', '0', '5', '140', '37', '0', '35', '12', '20%', '0%', '35%', '0%', '150']\n['Classic Espresso Drinks', 'Vanilla Latte (Or Other Flavoured Latte)', '2% Milk', '250', '6', '3.5', '0.2', '25', '150', '37', '0', '35', '12', '20%', '2%', '35%', '0%', '150']\n['Classic Espresso Drinks', 'Vanilla Latte (Or Other Flavoured Latte)', 'Soymilk', '210', '5', '0.5', '0', '0', '120', '31', '1', '26', '9', '10%', '0%', '35%', '15%', '150']\n['Classic Espresso Drinks', 'Vanilla Latte (Or Other Flavoured Latte)', 'Venti Nonfat Milk', '250', '0.3', '0.2', '0', '10', '180', '47', '0', '45', '15', '25%', '0%', '50%', '0%', '150']\n['Classic Espresso Drinks', 'Vanilla Latte (Or Other Flavoured Latte)', '2% Milk', '320', '9', '4.5', '0.3', '35', '200', '46', '0', '44', '15', '25%', '2%', '45%', '0%', '150']\n['Classic Espresso Drinks', 'Vanilla Latte (Or Other Flavoured Latte)', 'Soymilk', '270', '7', '1', '0', '0', '160', '39', '2', '33', '12', '15%', '0%', '45%', '20%', '150']\n['Classic Espresso Drinks', 'Caffè Americano', 'Short', '5', '0', '0', '0', '0', '5', '1', '0', '0', '0.4', '0%', '0%', '0%', '0%', '75']\n['Classic Espresso Drinks', 'Caffè Americano', 'Tall', '10', '0', '0', '0', '0', '10', '2', '0', '0', '1', '0%', '0%', '2%', '0%', '150']\n['Classic Espresso Drinks', 'Caffè Americano', 'Grande', '15', '0', '0', '0', '0', '15', '3', '0', '0', '1', '0%', '0%', '2%', '0%', '225']\n['Classic Espresso Drinks', 'Caffè Americano', 'Venti', '25', '0', '0', '0', '0', '15', '4', '0', '0', '1', '0%', '0%', '2%', '0%', '300']\n['Classic Espresso Drinks', 'Cappuccino', 'Short Nonfat Milk', '50', '0.1', '0.1', '0', '5', '60', '8', '0', '7', '5', '8%', '0%', '15%', '0%', '75']\n['Classic Espresso Drinks', 'Cappuccino', '2% Milk', '80', '3', '1.5', '0.1', '10', '70', '8', '0', '7', '5', '8%', '0%', '15%', '0%', '75']\n['Classic Espresso Drinks', 'Cappuccino', 'Soymilk', '50', '1.5', '0.2', '0', '0', '40', '4', '0', '3', '3', '4%', '0%', '10%', '6%', '75']\n['Classic Espresso Drinks', 'Cappuccino', 'Tall Nonfat Milk', '60', '0.1', '0.1', '0', '5', '70', '9', '0', '8', '6', '10%', '0%', '20%', '0%', '75']\n['Classic Espresso Drinks', 'Cappuccino', '2% Milk', '90', '3.5', '1.5', '0.1', '15', '80', '9', '0', '8', '6', '10%', '0%', '20%', '0%', '75']\n['Classic Espresso Drinks', 'Cappuccino', 'Soymilk', '70', '3', '0.4', '0', '0', '65', '7', '1', '4', '5', '6%', '0%', '20%', '8%', '75']\n['Classic Espresso Drinks', 'Cappuccino', 'Grande Nonfat Milk', '80', '0.2', '0.1', '0', '5', '90', '12', '0', '10', '8', '15%', '0%', '25%', '0%', '150']\n['Classic Espresso Drinks', 'Cappuccino', '2% Milk', '120', '4', '2', '0.1', '15', '100', '12', '0', '10', '8', '10%', '0%', '25%', '0%', '150']\n['Classic Espresso Drinks', 'Cappuccino', 'Soymilk', '100', '3.5', '0.4', '0', '0', '80', '9', '1', '5', '7', '8%', '0%', '25%', '10%', '150']\n['Classic Espresso Drinks', 'Cappuccino', 'Venti Nonfat Milk', '110', '0.2', '0.2', '0', '5', '120', '16', '0', '14', '10', '20%', '0%', '30%', '0%', '150']\n['Classic Espresso Drinks', 'Cappuccino', '2% Milk', '150', '6', '3', '0.2', '25', '135', '16', '0', '14', '10', '15%', '0%', '30%', '0%', '150']\n['Classic Espresso Drinks', 'Cappuccino', 'Soymilk', '120', '4.5', '0.5', '0', '0', '110', '11', '1', '7', '9', '10%', '0%', '35%', '15%', '150']\n['Classic Espresso Drinks', 'Espresso', 'Solo', '5', '0', '0', '0', '0', '0', '1', '0', '0', '0.4', '0%', '0%', '0%', '0%', '75']\n['Classic Espresso Drinks', 'Espresso', 'Doppio', '10', '0', '0', '0', '0', '1', '2', '0', '0', '1', '0%', '0%', '0%', '0%', '150']\n['Classic Espresso Drinks', 'Skinny Latte (Any Flavour)', 'Short Nonfat Milk', '60', '0.1', '0.1', '0', '5', '80', '9', '0', '8', '6', '10%', '0%', '20%', '0%', '75']\n['Classic Espresso Drinks', 'Skinny Latte (Any Flavour)', 'Tall Nonfat Milk', '100', '0.2', '0.1', '0', '5', '125', '14', '0', '13', '9', '15%', '0%', '30%', '0%', '75']\n['Classic Espresso Drinks', 'Skinny Latte (Any Flavour)', 'Grande Nonfat Milk', '120', '0.3', '0.2', '0', '5', '160', '18', '0', '16', '12', '20%', '0%', '35%', '0%', '150']\n['Classic Espresso Drinks', 'Skinny Latte (Any Flavour)', 'Venti Nonfat Milk', '160', '0.3', '0.2', '0', '10', '200', '24', '0', '21', '15', '25%', '0%', '50%', '0%', '150']\n['Signature Espresso Drinks', 'Caramel Macchiato', 'Short Nonfat Milk', '100', '1', '0.5', '0', '5', '70', '17', '0', '15', '6', '10%', '0%', '15%', '0%', '75']\n['Signature Espresso Drinks', 'Caramel Macchiato', '2% Milk', '120', '4', '2', '0.1', '15', '80', '16', '0', '15', '5', '10%', '0%', '15%', '0%', '75']\n['Signature Espresso Drinks', 'Caramel Macchiato', 'Soymilk', '100', '3', '1', '0', '0', '60', '14', '1', '11', '4', '6%', '0%', '15%', '8%', '75']\n['Signature Espresso Drinks', 'Caramel Macchiato', 'Tall Nonfat Milk', '140', '1', '0.5', '0', '5', '105', '25', '0', '24', '8', '15%', '0%', '25%', '0%', '75']\n['Signature Espresso Drinks', 'Caramel Macchiato', '2% Milk', '180', '5', '3', '0.1', '20', '115', '25', '0', '23', '8', '15%', '0%', '25%', '0%', '75']\n['Signature Espresso Drinks', 'Caramel Macchiato', 'Soymilk', '150', '4.5', '1', '0', '0', '90', '21', '1', '17', '7', '10%', '0%', '25%', '10%', '75']\n['Signature Espresso Drinks', 'Caramel Macchiato', 'Grande Nonfat Milk', '190', '1', '1', '0', '10', '130', '35', '0', '32', '11', '20%', '0%', '35%', '0%', '150']\n['Signature Espresso Drinks', 'Caramel Macchiato', '2% Milk', '240', '7', '3.5', '0.2', '25', '150', '34', '0', '32', '10', '15%', '2%', '30%', '0%', '150']\n['Signature Espresso Drinks', 'Caramel Macchiato', 'Soymilk', '200', '5', '1', '0', '5', '115', '29', '1', '24', '9', '10%', '0%', '35%', '15%', '150']\n['Signature Espresso Drinks', 'Caramel Macchiato', 'Venti Nonfat Milk', '240', '1', '1', '0', '10', '170', '43', '0', '41', '13', '25%', '0%', '40%', '0%', '150']\n['Signature Espresso Drinks', 'Caramel Macchiato', '2% Milk', '300', '8', '4.5', '0.2', '35', '180', '43', '0', '40', '13', '20%', '2%', '40%', '0%', '150']\n['Signature Espresso Drinks', 'Caramel Macchiato', 'Soymilk', '250', '7', '1.5', '0', '5', '140', '36', '1', '31', '11', '15%', '0%', '40%', '20%', '150']\n['Signature Espresso Drinks', 'White Chocolate Mocha (Without Whipped Cream)', 'Short Nonfat Milk', '180', '3', '2', '0', '5', '120', '31', '0', '29', '7', '10%', '0%', '25%', '0%', '75']\n['Signature Espresso Drinks', 'White Chocolate Mocha (Without Whipped Cream)', '2% Milk', '200', '6', '3.5', '0.1', '15', '125', '31', '0', '29', '7', '8%', '2%', '25%', '0%', '75']\n['Signature Espresso Drinks', 'White Chocolate Mocha (Without Whipped Cream)', 'Soymilk', '180', '5', '2.5', '0', '0', '110', '28', '1', '25', '6', '6%', '0%', '25%', '8%', '75']\n['Signature Espresso Drinks', 'White Chocolate Mocha (Without Whipped Cream)', 'Tall Nonfat Milk', '270', '4.5', '3.5', '0', '5', '190', '47', '0', '45', '12', '15%', '2%', '35%', '0%', '75']\n['Signature Espresso Drinks', 'White Chocolate Mocha (Without Whipped Cream)', '2% Milk', '310', '9', '6', '0.1', '20', '200', '46', '0', '44', '11', '15%', '2%', '35%', '0%', '75']\n['Signature Espresso Drinks', 'White Chocolate Mocha (Without Whipped Cream)', 'Soymilk', '280', '8', '3.5', '0', '0', '170', '42', '1', '39', '10', '10%', '2%', '35%', '10%', '75']\n['Signature Espresso Drinks', 'White Chocolate Mocha (Without Whipped Cream)', 'Grande Nonfat Milk', '350', '6', '4.5', '0', '10', '240', '61', '0', '58', '15', '20%', '2%', '45%', '2%', '150']\n['Signature Espresso Drinks', 'White Chocolate Mocha (Without Whipped Cream)', '2% Milk', '400', '11', '7', '0.2', '25', '250', '61', '0', '58', '15', '15%', '2%', '45%', '0%', '150']\n['Signature Espresso Drinks', 'White Chocolate Mocha (Without Whipped Cream)', 'Soymilk', '370', '10', '5', '0', '0', '220', '56', '1', '51', '13', '10%', '2%', '45%', '15%', '150']\n['Signature Espresso Drinks', 'White Chocolate Mocha (Without Whipped Cream)', 'Venti Nonfat Milk', '450', '7', '6', '0', '10', '310', '78', '0', '74', '19', '25%', '2%', '60%', '2%', '150']\n['Signature Espresso Drinks', 'White Chocolate Mocha (Without Whipped Cream)', '2% Milk', '510', '15', '9', '0.2', '35', '330', '77', '0', '74', '19', '20%', '4%', '60%', '2%', '150']\n['Signature Espresso Drinks', 'White Chocolate Mocha (Without Whipped Cream)', 'Soymilk', '460', '13', '6', '0', '5', '290', '70', '1', '64', '16', '15%', '2%', '60%', '20%', '150']\n['Signature Espresso Drinks', 'Hot Chocolate (Without Whipped Cream)', 'Short Nonfat Milk', '130', '1.5', '1', '0', '5', '70', '26', '1', '23', '7', '10%', '0%', '20%', '10%', '10']\n['Signature Espresso Drinks', 'Hot Chocolate (Without Whipped Cream)', '2% Milk', '150', '4.5', '2.5', '0.1', '15', '80', '26', '1', '22', '7', '10%', '0%', '20%', '10%', '10']\n['Signature Espresso Drinks', 'Hot Chocolate (Without Whipped Cream)', 'Soymilk', '130', '3.5', '1', '0', '0', '60', '23', '2', '18', '6', '6%', '0%', '20%', '20%', '10']\n['Signature Espresso Drinks', 'Hot Chocolate (Without Whipped Cream)', 'Tall Nonfat Milk', '190', '2', '1', '0', '5', '110', '37', '1', '32', '11', '15%', '0%', '30%', '20%', '20']\n['Signature Espresso Drinks', 'Hot Chocolate (Without Whipped Cream)', '2% Milk', '230', '7', '3.5', '0.2', '20', '120', '37', '1', '32', '10', '15%', '0%', '30%', '20%', '20']\n['Signature Espresso Drinks', 'Hot Chocolate (Without Whipped Cream)', 'Soymilk', '200', '6', '1.5', '0', '0', '95', '32', '2', '25', '9', '10%', '0%', '30%', '30%', '20']\n['Signature Espresso Drinks', 'Hot Chocolate (Without Whipped Cream)', 'Grande Nonfat Milk', '240', '2.5', '1.5', '0', '5', '140', '48', '2', '41', '14', '20%', '0%', '40%', '25%', '25']\n['Signature Espresso Drinks', 'Hot Chocolate (Without Whipped Cream)', '2% Milk', '290', '9', '5', '0.2', '25', '160', '47', '2', '41', '14', '20%', '2%', '40%', '25%', '25']\n['Signature Espresso Drinks', 'Hot Chocolate (Without Whipped Cream)', 'Soymilk', '250', '7', '2', '0', '0', '125', '41', '3', '32', '12', '15%', '0%', '40%', '40%', '25']\n['Signature Espresso Drinks', 'Hot Chocolate (Without Whipped Cream)', 'Venti Nonfat Milk', '320', '3', '2', '0', '10', '180', '63', '2', '55', '18', '25%', '0%', '50%', '30%', '30']\n['Signature Espresso Drinks', 'Hot Chocolate (Without Whipped Cream)', '2% Milk', '380', '11', '6', '0.3', '35', '200', '63', '2', '54', '17', '25%', '2%', '50%', '30%', '30']\n['Signature Espresso Drinks', 'Hot Chocolate (Without Whipped Cream)', 'Soymilk', '330', '9', '2.5', '0', '0', '160', '55', '4', '44', '15', '15%', '0%', '50%', '50%', '30']\n['Signature Espresso Drinks', 'Caramel Apple Spice (Without Whipped Cream)', 'Short', '140', '0', '0', '0', '0', '10', '35', '0', '33', '0', '0%', '0%', '0%', '0%', '0']\n['Signature Espresso Drinks', 'Caramel Apple Spice (Without Whipped Cream)', 'Tall', '210', '0', '0', '0', '0', '15', '53', '0', '49', '0', '0%', '0%', '0%', '0%', '0']\n['Signature Espresso Drinks', 'Caramel Apple Spice (Without Whipped Cream)', 'Grande', '280', '0', '0', '0', '0', '20', '70', '0', '65', '0', '0%', '0%', '0%', '0%', '0']\n['Signature Espresso Drinks', 'Caramel Apple Spice (Without Whipped Cream)', 'Venti', '360', '0', '0', '0', '0', '25', '89', '0', '83', '0', '0%', '0%', '0%', '0%', '0']\n['Tazo® Tea Drinks', 'Tazo® Tea', 'Short', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0%', '0%', '0%', '0%', 'Varies']\n['Tazo® Tea Drinks', 'Tazo® Tea', 'Tall', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0%', '0%', '0%', '0%', 'Varies']\n['Tazo® Tea Drinks', 'Tazo® Tea', 'Grande', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0%', '0%', '0%', '0%', 'Varies']\n['Tazo® Tea Drinks', 'Tazo® Tea', 'Venti', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0%', '0%', '0%', '0%', 'Varies']\n['Tazo® Tea Drinks', 'Tazo® Chai Tea Latte', 'Short Nonfat Milk', '100', '0.1', '0.1', '0', '0', '50', '22', '0', '21', '4', '6%', '0%', '10%', '0%', '50']\n['Tazo® Tea Drinks', 'Tazo® Chai Tea Latte', '2% Milk', '120', '2', '1', '0.1', '10', '55', '22', '0', '21', '4', '6%', '0%', '10%', '0%', '50']\n['Tazo® Tea Drinks', 'Tazo® Chai Tea Latte', 'Soymilk', '110', '1.5', '0.2', '0', '0', '45', '20', '0', '18', '3', '4%', '0%', '10%', '6%', '50']\n['Tazo® Tea Drinks', 'Tazo® Chai Tea Latte', 'Tall Nonfat Milk', '160', '0.2', '0.1', '0', '5', '80', '34', '0', '32', '6', '10%', '0%', '20%', '2%', '70']\n['Tazo® Tea Drinks', 'Tazo® Chai Tea Latte', '2% Milk', '190', '3.5', '1.5', '0.1', '15', '90', '34', '0', '32', '6', '10%', '0%', '20%', '2%', '70']\n['Tazo® Tea Drinks', 'Tazo® Chai Tea Latte', 'Soymilk', '160', '2.5', '0.3', '0', '0', '70', '31', '1', '28', '5', '6%', '0%', '20%', '10%', '70']\n['Tazo® Tea Drinks', 'Tazo® Chai Tea Latte', 'Grande Nonfat Milk', '210', '0.2', '0.1', '0', '5', '105', '45', '0', '43', '8', '15%', '0%', '25%', '2%', '95']\n['Tazo® Tea Drinks', 'Tazo® Chai Tea Latte', '2% Milk', '240', '4.5', '2', '0.1', '20', '115', '45', '0', '42', '8', '10%', '0%', '25%', '2%', '95']\n['Tazo® Tea Drinks', 'Tazo® Chai Tea Latte', 'Soymilk', '220', '3.5', '0.4', '0', '0', '90', '41', '1', '37', '6', '8%', '0%', '25%', '10%', '95']\n['Tazo® Tea Drinks', 'Tazo® Chai Tea Latte', 'Venti Nonfat Milk', '260', '0.3', '0.2', '0', '5', '135', '57', '0', '54', '10', '15%', '0%', '35%', '2%', '120']\n['Tazo® Tea Drinks', 'Tazo® Chai Tea Latte', '2% Milk', '310', '6', '3', '0.2', '25', '150', '56', '0', '53', '10', '15%', '2%', '30%', '2%', '120']\n['Tazo® Tea Drinks', 'Tazo® Chai Tea Latte', 'Soymilk', '280', '4.5', '0.5', '0', '0', '115', '51', '1', '46', '8', '10%', '0%', '30%', '15%', '120']\n['Tazo® Tea Drinks', 'Tazo® Green Tea Latte', 'Short Nonfat Milk', '130', '0.2', '0.1', '0', '5', '85', '26', '0', '25', '7', '10%', '4%', '20%', '2%', '25']\n['Tazo® Tea Drinks', 'Tazo® Green Tea Latte', '2% Milk', '170', '4', '2', '0.1', '15', '95', '26', '0', '25', '7', '10%', '6%', '20%', '2%', '25']\n['Tazo® Tea Drinks', 'Tazo® Green Tea Latte', 'Soymilk', '140', '3', '0.4', '0', '0', '70', '22', '1', '20', '6', '8%', '4%', '20%', '10%', '25']\n['Tazo® Tea Drinks', 'Tazo® Green Tea Latte', 'Tall Nonfat Milk', '210', '0.4', '0.2', '0', '5', '125', '42', '1', '41', '11', '20%', '10%', '35%', '4%', '55']\n['Tazo® Tea Drinks', 'Tazo® Green Tea Latte', '2% Milk', '260', '6', '3', '0.2', '25', '140', '41', '1', '40', '10', '15%', '10%', '35%', '4%', '55']\n['Tazo® Tea Drinks', 'Tazo® Green Tea Latte', 'Soymilk', '230', '4.5', '0.5', '0', '0', '110', '36', '2', '33', '9', '10%', '10%', '35%', '15%', '55']\n['Tazo® Tea Drinks', 'Tazo® Green Tea Latte', 'Grande Nonfat Milk', '290', '0.5', '0.2', '0', '10', '160', '57', '1', '56', '14', '25%', '15%', '45%', '4%', '80']\n['Tazo® Tea Drinks', 'Tazo® Green Tea Latte', '2% Milk', '350', '8', '4', '0.2', '30', '180', '57', '1', '55', '14', '20%', '15%', '45%', '4%', '80']\n['Tazo® Tea Drinks', 'Tazo® Green Tea Latte', 'Soymilk', '310', '6', '1', '0', '0', '140', '50', '3', '46', '11', '15%', '15%', '45%', '25%', '80']\n['Tazo® Tea Drinks', 'Tazo® Green Tea Latte', 'Venti Nonfat Milk', '370', '0.5', '0.3', '0', '10', '200', '73', '2', '71', '18', '30%', '20%', '60%', '6%', '110']\n['Tazo® Tea Drinks', 'Tazo® Green Tea Latte', '2% Milk', '450', '10', '5', '0.3', '40', '230', '72', '2', '71', '17', '25%', '20%', '50%', '6%', '110']\n['Tazo® Tea Drinks', 'Tazo® Green Tea Latte', 'Soymilk', '390', '8', '1', '0', '0', '180', '64', '4', '58', '14', '20%', '20%', '60%', '30%', '110']\n['Tazo® Tea Drinks', 'Tazo® Full-Leaf Tea Latte', 'Short Nonfat Milk', '80', '0.1', '0.1', '0', '0', '45', '16', '0', '16', '4', '6%', '0%', '10%', '0.00%', 'varies']\n['Tazo® Tea Drinks', 'Tazo® Full-Leaf Tea Latte', '2% Milk', '90', '2', '1', '0.1', '10', '50', '15', '0', '15', '3', '6%', '0%', '10%', '0.00%', 'varies']\n['Tazo® Tea Drinks', 'Tazo® Full-Leaf Tea Latte', 'Soymilk', '80', '1.5', '0.2', '0', '0', '40', '14', '0', '13', '3', '4%', '0%', '10%', '6.00%', 'varies']\n['Tazo® Tea Drinks', 'Tazo® Full-Leaf Tea Latte', 'Tall Nonfat Milk', '120', '0.1', '0.1', '0', '5', '65', '23', '0', '23', '5', '10%', '0%', '20%', '0.00%', 'varies']\n['Tazo® Tea Drinks', 'Tazo® Full-Leaf Tea Latte', '2% Milk', '140', '3', '1.5', '0.1', '15', '75', '23', '0', '23', '5', '8%', '0%', '15%', '0.00%', 'varies']\n['Tazo® Tea Drinks', 'Tazo® Full-Leaf Tea Latte', 'Soymilk', '130', '2.5', '0.3', '0', '0', '60', '21', '1', '19', '4', '6%', '0%', '20%', '8.00%', 'varies']\n['Tazo® Tea Drinks', 'Tazo® Full-Leaf Tea Latte', 'Grande Nonfat Milk', '150', '0.2', '0.1', '0', '5', '85', '31', '0', '31', '7', '15%', '0%', '25%', '0.00%', 'varies']\n['Tazo® Tea Drinks', 'Tazo® Full-Leaf Tea Latte', '2% Milk', '190', '4', '2', '0.1', '15', '95', '31', '0', '30', '7', '10%', '0%', '25%', '0.00%', 'varies']\n['Tazo® Tea Drinks', 'Tazo® Full-Leaf Tea Latte', 'Soymilk', '170', '3.5', '0.4', '0', '0', '80', '27', '1', '25', '6', '8%', '0%', '25%', '10.00%', 'varies']\n['Tazo® Tea Drinks', 'Tazo® Full-Leaf Tea Latte', 'Venti Nonfat Milk', '190', '0.2', '0.1', '0', '5', '110', '39', '0', '39', '9', '15%', '0%', '30%', '0.00%', 'varies']\n['Tazo® Tea Drinks', 'Tazo® Full-Leaf Tea Latte', '2% Milk', '230', '5', '2.5', '0.2', '20', '125', '38', '0', '38', '9', '15%', '0%', '30%', '0.00%', 'varies']\n['Tazo® Tea Drinks', 'Tazo® Full-Leaf Tea Latte', 'Soymilk', '210', '4', '0.5', '0', '0', '100', '34', '1', '32', '7', '10%', '0%', '30%', '15.00%', 'varies']\n['Tazo® Tea Drinks', 'Tazo® Full-Leaf Red Tea Latte (Vanilla Rooibos)', 'Short Nonfat Milk', '80', '0.1', '0.1', '0', '0', '45', '16', '0', '16', '4', '6%', '0%', '10%', '0%', '0']\n['Tazo® Tea Drinks', 'Tazo® Full-Leaf Red Tea Latte (Vanilla Rooibos)', '2% Milk', '90', '2', '1', '0.1', '10', '50', '15', '0', '15', '3', '6%', '0%', '10%', '0%', '0']\n['Tazo® Tea Drinks', 'Tazo® Full-Leaf Red Tea Latte (Vanilla Rooibos)', 'Soymilk', '80', '1.5', '0.2', '0', '0', '40', '14', '0', '13', '3', '4%', '0%', '10%', '6%', '0']\n['Tazo® Tea Drinks', 'Tazo® Full-Leaf Red Tea Latte (Vanilla Rooibos)', 'Tall Nonfat Milk', '120', '0.1', '0.1', '0', '5', '65', '23', '0', '23', '5', '10%', '0%', '20%', '0%', '0']\n['Tazo® Tea Drinks', 'Tazo® Full-Leaf Red Tea Latte (Vanilla Rooibos)', '2% Milk', '140', '3', '1.5', '0.1', '15', '75', '23', '0', '23', '5', '8%', '0%', '15%', '0%', '0']\n['Tazo® Tea Drinks', 'Tazo® Full-Leaf Red Tea Latte (Vanilla Rooibos)', 'Soymilk', '130', '2.5', '0.3', '0', '0', '60', '21', '1', '19', '4', '6%', '0%', '20%', '8%', '0']\n['Tazo® Tea Drinks', 'Tazo® Full-Leaf Red Tea Latte (Vanilla Rooibos)', 'Grande Nonfat Milk', '150', '0.2', '0.1', '0', '5', '85', '31', '0', '31', '7', '15%', '0%', '25%', '0%', '0']\n['Tazo® Tea Drinks', 'Tazo® Full-Leaf Red Tea Latte (Vanilla Rooibos)', '2% Milk', '190', '4', '2', '0.1', '15', '95', '31', '0', '30', '7', '10%', '0%', '25%', '0%', '0']\n['Tazo® Tea Drinks', 'Tazo® Full-Leaf Red Tea Latte (Vanilla Rooibos)', 'Soymilk', '170', '3.5', '0.4', '0', '0', '80', '27', '1', '25', '6', '8%', '0%', '25%', '10%', '0']\n['Tazo® Tea Drinks', 'Tazo® Full-Leaf Red Tea Latte (Vanilla Rooibos)', 'Venti Nonfat Milk', '190', '0.2', '0.1', '0', '5', '110', '39', '0', '39', '9', '15%', '0%', '30%', '0%', '0']\n['Tazo® Tea Drinks', 'Tazo® Full-Leaf Red Tea Latte (Vanilla Rooibos)', '2% Milk', '230', '5', '2.5', '0.2', '20', '125', '38', '0', '38', '9', '15%', '0%', '30%', '0%', '0']\n['Tazo® Tea Drinks', 'Tazo® Full-Leaf Red Tea Latte (Vanilla Rooibos)', 'Soymilk', '210', '4', '0.5', '0', '0', '100', '34', '1', '32', '7', '10%', '0%', '30%', '15%', '0']\n['Shaken Iced Beverages', 'Iced Brewed Coffee (With Classic Syrup)', 'Tall', '60', '0', '0', '0', '0', '4', '15', '0', '15', '0.2', '0%', '0%', '0%', '0%', '120']\n['Shaken Iced Beverages', 'Iced Brewed Coffee (With Classic Syrup)', 'Grande', '90', '0.1', '0', '0', '0', '5', '21', '0', '21', '0.3', '0%', '0%', '0%', '0%', '165']\n['Shaken Iced Beverages', 'Iced Brewed Coffee (With Classic Syrup)', 'Venti', '130', '0.1', '0', '0', '0', '5', '31', '0', '31', '0.4', '0%', '0%', '0%', '0%', '235']\n['Shaken Iced Beverages', 'Iced Brewed Coffee (With Milk & Classic Syrup)', 'Tall Nonfat Milk', '80', '0.1', '0', '0', '0', '25', '18', '0', '18', '2', '2%', '0%', '6%', '0%', '90']\n['Shaken Iced Beverages', 'Iced Brewed Coffee (With Milk & Classic Syrup)', '2% Milk', '90', '1', '0.5', '0', '5', '25', '18', '0', '18', '2', '2%', '0%', '6%', '0.00%', '']\n['Shaken Iced Beverages', 'Iced Brewed Coffee (With Milk & Classic Syrup)', 'Soymilk', '80', '1', '0.1', '0', '0', '20', '17', '0', '17', '2', '2%', '0%', '6%', '0%', '90']\n['Shaken Iced Beverages', 'Iced Brewed Coffee (With Milk & Classic Syrup)', 'Grande Nonfat Milk', '110', '0.1', '0', '0', '0', '30', '24', '0', '24', '2', '4%', '0%', '8%', '2%', '90']\n['Shaken Iced Beverages', 'Iced Brewed Coffee (With Milk & Classic Syrup)', '2% Milk', '120', '1.5', '0.5', '0', '5', '35', '24', '0', '24', '2', '4%', '0%', '8%', '0%', '125']\n['Shaken Iced Beverages', 'Iced Brewed Coffee (With Milk & Classic Syrup)', 'Soymilk', '110', '1', '0.1', '0', '0', '30', '23', '0', '22', '2', '2%', '0%', '8%', '0%', '125']\n['Shaken Iced Beverages', 'Iced Brewed Coffee (With Milk & Classic Syrup)', 'Venti Nonfat Milk', '160', '0.1', '0.1', '0', '0', '50', '36', '0', '36', '4', '6%', '0%', '10%', '4%', '125']\n['Shaken Iced Beverages', 'Iced Brewed Coffee (With Milk & Classic Syrup)', '2% Milk', '180', '2', '1', '0.1', '10', '55', '36', '0', '36', '4', '6%', '0%', '10%', '0%', '170']\n['Shaken Iced Beverages', 'Iced Brewed Coffee (With Milk & Classic Syrup)', 'Soymilk', '170', '1.5', '0.2', '0', '0', '45', '34', '0', '33', '3', '4%', '0%', '10%', '0%', '170']\n['Shaken Iced Beverages', 'Shaken Iced Tazo® Tea (With Classic Syrup)', 'Tall', '60', '0', '0', '0', '0', '0', '15', '0', '15', '0', '0%', '0%', '0%', '6%', '170']\n['Shaken Iced Beverages', 'Shaken Iced Tazo® Tea (With Classic Syrup)', 'Grande', '80', '0', '0', '0', '0', '0', '21', '0', '21', '0', '0%', '0%', '0%', '0%', 'Varies']\n['Shaken Iced Beverages', 'Shaken Iced Tazo® Tea (With Classic Syrup)', 'Venti', '120', '0', '0', '0', '0', '0', '31', '0', '31', '0', '0%', '0%', '0%', '0%', 'Varies']\n['Shaken Iced Beverages', 'Shaken Iced Tazo® Tea Lemonade (With Classic Syrup)', 'Tall', '100', '0', '0', '0', '0', '0', '25', '0', '24', '0.1', '0%', '10%', '0%', '0%', 'Varies']\n['Shaken Iced Beverages', 'Shaken Iced Tazo® Tea Lemonade (With Classic Syrup)', 'Grande', '130', '0', '0', '0', '0', '0', '33', '0', '33', '0.1', '0%', '15%', '0%', '0%', 'Varies']\n['Shaken Iced Beverages', 'Shaken Iced Tazo® Tea Lemonade (With Classic Syrup)', 'Venti', '190', '0', '0', '0', '0', '0', '49', '0', '49', '0.1', '0%', '20%', '0%', '0%', 'Varies']\n['Smoothies', 'Banana Chocolate Smoothie', 'Grande Nonfat Milk', '280', '2.5', '1.5', '0', '5', '150', '53', '7', '34', '20', '10%', '15%', '20%', '0%', 'Varies']\n['Smoothies', 'Banana Chocolate Smoothie', '2% Milk', '300', '5', '2.5', '0.1', '15', '160', '53', '7', '34', '20', '8%', '15%', '20%', '20%', '15']\n['Smoothies', 'Banana Chocolate Smoothie', 'Soymilk', '290', '4.5', '1.5', '0', '5', '150', '51', '7', '31', '19', '6%', '15%', '20%', '20%', '15']\n['Smoothies', 'Orange Mango Banana Smoothie', 'Grande Nonfat Milk', '260', '1', '0.3', '0', '5', '120', '54', '6', '37', '16', '50%', '80%', '10%', '30%', '15']\n['Smoothies', 'Orange Mango Banana Smoothie', '2% Milk', '270', '1.5', '0.5', '0', '5', '125', '53', '6', '37', '16', '50%', '80%', '10%', '6%', '0']\n['Smoothies', 'Orange Mango Banana Smoothie', 'Soymilk', '270', '1.5', '0.4', '0', '5', '120', '53', '6', '36', '15', '50%', '80%', '10%', '6%', '0']\n['Smoothies', 'Strawberry Banana Smoothie', 'Grande Nonfat Milk', '290', '1', '0.3', '0', '5', '125', '58', '7', '41', '16', '4%', '100%', '10%', '8%', '0']\n['Smoothies', 'Strawberry Banana Smoothie', '2% Milk', '290', '2', '1', '0', '5', '125', '58', '7', '41', '16', '4%', '100%', '10%', '8%', '0']\n['Smoothies', 'Strawberry Banana Smoothie', 'Soymilk', '290', '2', '0.4', '0', '5', '120', '58', '8', '40', '16', '2%', '100%', '10%', '8%', '0']\n['Frappuccino® Blended Coffee', 'Coffee', 'Tall Nonfat Milk', '160', '0.1', '0', '0', '0', '160', '36', '0', '36', '3', '4%', '0%', '10%', '10%', '0']\n['Frappuccino® Blended Coffee', 'Coffee', 'Whole Milk', '180', '2.5', '1.5', '0.1', '10', '160', '36', '0', '36', '3', '4%', '0%', '8%', '0%', '70']\n['Frappuccino® Blended Coffee', 'Coffee', 'Soymilk', '160', '1.5', '0.2', '0', '0', '150', '35', '0', '34', '2', '4%', '0%', '10%', '0%', '70']\n['Frappuccino® Blended Coffee', 'Coffee', 'Grande Nonfat Milk', '220', '0.1', '0.1', '0', '0', '210', '51', '0', '50', '4', '6%', '0%', '10%', '4%', '70']\n['Frappuccino® Blended Coffee', 'Coffee', 'Whole Milk', '240', '3', '2', '0.1', '10', '220', '50', '0', '50', '3', '4%', '0%', '10%', '0%', '95']\n['Frappuccino® Blended Coffee', 'Coffee', 'Soymilk', '220', '1.5', '0.2', '0', '0', '210', '49', '0', '47', '3', '4%', '0%', '10%', '0%', '95']\n['Frappuccino® Blended Coffee', 'Coffee', 'Venti Nonfat Milk', '310', '0.1', '0.1', '0', '5', '300', '70', '0', '69', '6', '10%', '0%', '20%', '6%', '95']\n['Frappuccino® Blended Coffee', 'Coffee', 'Whole Milk', '350', '5', '3', '0.2', '15', '300', '70', '0', '69', '5', '8%', '0%', '15%', '2%', '130']\n['Frappuccino® Blended Coffee', 'Coffee', 'Soymilk', '310', '2.5', '0.3', '0', '0', '300', '68', '1', '66', '5', '6%', '0%', '20%', '2%', '130']\n['Frappuccino® Blended Coffee', 'Mocha (Without Whipped Cream)', 'Tall Nonfat Milk', '180', '0.5', '0.4', '0', '0', '150', '42', '1', '40', '3', '4%', '0%', '10%', '8%', '130']\n['Frappuccino® Blended Coffee', 'Mocha (Without Whipped Cream)', 'Whole Milk', '200', '3', '2', '0.1', '10', '160', '42', '1', '40', '3', '4%', '0%', '8%', '6%', '70']\n['Frappuccino® Blended Coffee', 'Mocha (Without Whipped Cream)', 'Soymilk', '180', '2', '0.5', '0', '0', '150', '40', '1', '38', '3', '4%', '0%', '10%', '10%', '70']\n['Frappuccino® Blended Coffee', 'Mocha (Without Whipped Cream)', 'Grande Nonfat Milk', '260', '1', '0.5', '0', '0', '220', '61', '1', '58', '4', '6%', '0%', '10%', '8%', '110']\n['Frappuccino® Blended Coffee', 'Mocha (Without Whipped Cream)', 'Whole Milk', '290', '4', '2.5', '0.1', '10', '220', '61', '1', '58', '4', '4%', '0%', '10%', '8%', '110']\n['Frappuccino® Blended Coffee', 'Mocha (Without Whipped Cream)', 'Soymilk', '260', '2.5', '0.5', '0', '0', '220', '59', '1', '55', '4', '4%', '0%', '10%', '15%', '110']\n['Frappuccino® Blended Coffee', 'Mocha (Without Whipped Cream)', 'Venti Nonfat Milk', '340', '1', '0.5', '0', '5', '300', '80', '1', '76', '7', '10%', '0%', '15%', '10%', '140']\n['Frappuccino® Blended Coffee', 'Mocha (Without Whipped Cream)', 'Whole Milk', '390', '6', '3.5', '0.1', '15', '300', '80', '1', '76', '6', '8%', '0%', '15%', '10%', '140']\n['Frappuccino® Blended Coffee', 'Mocha (Without Whipped Cream)', 'Soymilk', '350', '3', '1', '0', '0', '290', '78', '2', '73', '6', '6%', '0%', '20%', '20%', '140']\n['Frappuccino® Blended Coffee', 'Caramel (Without Whipped Cream)', 'Tall Nonfat Milk', '180', '0.1', '0', '0', '0', '160', '42', '0', '41', '3', '4%', '0%', '10%', '0%', '70']\n['Frappuccino® Blended Coffee', 'Caramel (Without Whipped Cream)', 'Whole Milk', '200', '2.5', '1.5', '0.1', '10', '160', '42', '0', '41', '3', '4%', '0%', '8%', '0%', '70']\n['Frappuccino® Blended Coffee', 'Caramel (Without Whipped Cream)', 'Soymilk', '180', '1.5', '0.2', '0', '0', '150', '40', '0', '39', '2', '4%', '0%', '10%', '4%', '70']\n['Frappuccino® Blended Coffee', 'Caramel (Without Whipped Cream)', 'Grande Nonfat Milk', '280', '0.1', '0.1', '0', '0', '220', '60', '0', '59', '4', '6%', '0%', '10%', '0%', '100']\n['Frappuccino® Blended Coffee', 'Caramel (Without Whipped Cream)', 'Whole Milk', '280', '3.5', '2', '0.1', '10', '220', '60', '0', '59', '3', '4%', '0%', '10%', '0%', '100']\n['Frappuccino® Blended Coffee', 'Caramel (Without Whipped Cream)', 'Soymilk', '260', '1.5', '0.2', '0', '0', '220', '58', '0', '56', '3', '4%', '0%', '10%', '6%', '100']\n['Frappuccino® Blended Coffee', 'Caramel (Without Whipped Cream)', 'Venti Nonfat Milk', '330', '0.1', '0.1', '0', '5', '290', '78', '0', '77', '5', '10%', '0%', '15%', '2%', '130']\n['Frappuccino® Blended Coffee', 'Caramel (Without Whipped Cream)', 'Whole Milk', '370', '5', '3', '0.1', '15', '300', '78', '0', '77', '5', '8%', '0%', '15%', '2%', '130']\n['Frappuccino® Blended Coffee', 'Caramel (Without Whipped Cream)', 'Soymilk', '340', '2.5', '0.3', '0', '0', '290', '75', '1', '73', '5', '6%', '0%', '20%', '8%', '130']\n['Frappuccino® Blended Coffee', 'Java Chip (Without Whipped Cream)', 'Tall Nonfat Milk', '220', '3', '2', '0', '0', '170', '47', '1', '44', '4', '4%', '0%', '10%', '20%', '75']\n['Frappuccino® Blended Coffee', 'Java Chip (Without Whipped Cream)', 'Whole Milk', '240', '5', '3.5', '0.1', '10', '170', '47', '1', '44', '4', '4%', '0%', '8%', '20%', '75']\n['Frappuccino® Blended Coffee', 'Java Chip (Without Whipped Cream)', 'Soymilk', '220', '4', '2.5', '0', '0', '170', '45', '1', '42', '3', '4%', '0%', '10%', '20%', '75']\n['Frappuccino® Blended Coffee', 'Java Chip (Without Whipped Cream)', 'Grande Nonfat Milk', '310', '4', '3', '0', '0', '250', '67', '2', '62', '5', '6%', '0%', '10%', '25%', '110']\n['Frappuccino® Blended Coffee', 'Java Chip (Without Whipped Cream)', 'Whole Milk', '340', '7', '5', '0.1', '10', '250', '67', '2', '62', '5', '4%', '0%', '10%', '25%', '110']\n['Frappuccino® Blended Coffee', 'Java Chip (Without Whipped Cream)', 'Soymilk', '310', '6', '3.5', '0', '0', '240', '65', '2', '60', '4', '4%', '0%', '10%', '30%', '110']\n['Frappuccino® Blended Coffee', 'Java Chip (Without Whipped Cream)', 'Venti Nonfat Milk', '420', '5', '4', '0', '5', '340', '90', '2', '84', '7', '10%', '0%', '20%', '35%', '145']\n['Frappuccino® Blended Coffee', 'Java Chip (Without Whipped Cream)', 'Whole Milk', '460', '10', '7', '0.2', '15', '340', '90', '2', '84', '7', '6%', '0%', '15%', '35%', '145']\n['Frappuccino® Blended Coffee', 'Java Chip (Without Whipped Cream)', 'Soymilk', '430', '8', '4.5', '0', '0', '330', '88', '3', '80', '6', '6%', '0%', '20%', '40%', '145']\n['Frappuccino® Light Blended Coffee', 'Coffee', 'Tall Nonfat Milk', '90', '0.1', '0', '0', '0', '160', '20', '0', '19', '3', '4%', '0%', '10%', '0%', '70']\n['Frappuccino® Light Blended Coffee', 'Coffee', 'Grande Nonfat Milk', '120', '0.1', '0.1', '0', '0', '210', '26', '0', '26', '3', '6%', '0%', '10%', '0%', '95']\n['Frappuccino® Light Blended Coffee', 'Coffee', 'Venti Nonfat Milk', '160', '0.1', '0.1', '0', '5', '270', '34', '0', '33', '5', '8%', '0%', '15%', '2%', '120']\n['Frappuccino® Light Blended Coffee', 'Mocha', 'Tall Nonfat Milk', '110', '0.5', '0.4', '0', '0', '150', '24', '1', '23', '3', '4%', '0%', '8%', '6%', '70']\n['Frappuccino® Light Blended Coffee', 'Mocha', 'Grande Nonfat Milk', '150', '1', '0.5', '0', '0', '200', '33', '1', '30', '4', '6%', '0%', '10%', '8%', '95']\n['Frappuccino® Light Blended Coffee', 'Mocha', 'Venti Nonfat Milk', '210', '1', '0.5', '0', '5', '280', '46', '1', '42', '6', '8%', '0%', '15%', '10%', '130']\n['Frappuccino® Light Blended Coffee', 'Caramel', 'Tall Nonfat Milk', '100', '0.1', '0', '0', '0', '140', '23', '0', '23', '3', '4%', '0%', '8%', '0%', '65']\n['Frappuccino® Light Blended Coffee', 'Caramel', 'Grande Nonfat Milk', '150', '0.1', '0.1', '0', '0', '200', '33', '0', '32', '3', '6%', '0%', '10%', '0%', '90']\n['Frappuccino® Light Blended Coffee', 'Caramel', 'Venti Nonfat Milk', '200', '0.1', '0.1', '0', '5', '270', '44', '0', '43', '5', '8%', '0%', '15%', '2%', '120']\n['Frappuccino® Light Blended Coffee', 'Java Chip', 'Tall Nonfat Milk', '150', '3', '2', '0', '0', '170', '30', '1', '27', '4', '4%', '0%', '10%', '20%', '70']\n['Frappuccino® Light Blended Coffee', 'Java Chip', 'Grande Nonfat Milk', '220', '4', '3', '0', '0', '240', '43', '2', '39', '5', '6%', '0%', '10%', '25%', '105']\n['Frappuccino® Light Blended Coffee', 'Java Chip', 'Venti Nonfat Milk', '290', '5', '4', '0', '5', '320', '58', '2', '52', '7', '8%', '0%', '15%', '35%', '165']\n['Frappuccino® Blended Crème', 'Strawberries & Crème (Without Whipped Cream)', 'Tall Nonfat Milk', '170', '0.1', '0.1', '0', '0', '140', '39', '0', '38', '3', '6%', '6%', '10%', '2%', '0']\n['Frappuccino® Blended Crème', 'Strawberries & Crème (Without Whipped Cream)', 'Whole Milk', '190', '3', '1.5', '0.1', '10', '140', '38', '0', '37', '3', '4%', '6%', '10%', '2%', '0']\n['Frappuccino® Blended Crème', 'Strawberries & Crème (Without Whipped Cream)', 'Soymilk', '170', '1.5', '0.2', '0', '0', '135', '37', '1', '35', '3', '4%', '6%', '10%', '6%', '0']\n['Frappuccino® Blended Crème', 'Strawberries & Crème (Without Whipped Cream)', 'Grande Nonfat Milk', '230', '0.2', '0.1', '0', '0', '190', '53', '0', '52', '4', '8%', '6%', '15%', '4%', '0']\n['Frappuccino® Blended Crème', 'Strawberries & Crème (Without Whipped Cream)', 'Whole Milk', '260', '4', '2', '0.1', '10', '190', '53', '0', '52', '4', '6%', '6%', '15%', '4%', '0']\n['Frappuccino® Blended Crème', 'Strawberries & Crème (Without Whipped Cream)', 'Soymilk', '240', '2', '0.2', '0', '0', '180', '51', '1', '49', '3', '4%', '6%', '15%', '8%', '0']\n['Frappuccino® Blended Crème', 'Strawberries & Crème (Without Whipped Cream)', 'Venti Nonfat Milk', '310', '0.2', '0.1', '0', '5', '260', '70', '0', '69', '6', '10%', '8%', '20%', '4%', '0']\n['Frappuccino® Blended Crème', 'Strawberries & Crème (Without Whipped Cream)', 'Whole Milk', '350', '6', '3', '0.2', '15', '260', '70', '0', '68', '6', '8%', '8%', '20%', '4%', '0']\n['Frappuccino® Blended Crème', 'Strawberries & Crème (Without Whipped Cream)', 'Soymilk', '320', '3 2', '0.4', '0', '0', '250', '67', '1', '64', '5', '6%', '8%', '20%', '10%', '0']\n['Frappuccino® Blended Crème', 'Vanilla Bean (Without Whipped Cream)', 'Tall Nonfat Milk', '170', '0.1', '0.1', '0', '0', '160', '39', '0', '38', '4', '6%', '0%', '10%', '0%', '0']\n['Frappuccino® Blended Crème', 'Vanilla Bean (Without Whipped Cream)', 'Whole Milk', '200', '3.5', '2', '0.1', '10', '160', '39', '0', '38', '3', '6%', '0%', '10%', '0%', '0']\n['Frappuccino® Blended Crème', 'Vanilla Bean (Without Whipped Cream)', 'Soymilk', '180', '1.5', '0.2', '0', '0', '160', '37', '1', '35', '3', '4%', '0%', '10%', '6%', '0']\n['Frappuccino® Blended Crème', 'Vanilla Bean (Without Whipped Cream)', 'Grande Nonfat Milk', '240', '0.1', '0.1', '0', '5', '230', '56', '0', '55', '5', '8%', '0%', '15%', '0%', '0']\n"
]
],
[
[
"## Bar Chart\n\nLet's start by creating a visualization that you might already be familiar with: a bar chart. Bar charts are used to show comparisons between categories of data. A bar chart will have two axis, one will typically be numerical values while the other will be some sort of category. There are two types of bar charts: vertical and horizontal. Let's looks at some examples of how to create a bar chart using our dataset!",
"_____no_output_____"
],
[
"In this example let's compare the sugar content of different types of drinks (lattes, mochas, and teas) using our dataset. Here are the steps we are going to perform to create this visualization: \n\n1. Read in the data\n2. Extract the headers \n3. Find the index which corresponds to the beverage category and grams of sugar \n4. Filter for the types of drinks we are interested in (lattes, mochas, and teas)\n5. Store in a list\n6. Average the amount of sugar per type\n7. Use matplotlib to build a bar chart\n\nThe first axis of our bar chart will be the beverage type, the second will be the the average sugar content in grams. More on bar charts in matplotlib [here](https://pythonspot.com/matplotlib-bar-chart/)",
"_____no_output_____"
]
],
[
[
"import csv #the csv library\nimport matplotlib.pyplot as plt #The visualization library\nimport numpy as np #provides math functions\n\nwith open('starbucks_drinkMenu_expanded.csv') as csvfile: #open the file\n #creates a csv reader object which stores the lines of the files in lists and lets us iterate over them\n drinksreader = csv.reader(csvfile) \n headers = next(drinksreader, None) #skip over the headers\n \n #get the index that corresponds to the information we are interested in\n drink_category_index = headers.index(\"Beverage\")\n sugars_index = headers.index(\" Sugars (g)\")\n \n #This is where we will store the sugar info for our different beverage types\n sugar_in_lattes = []\n sugar_in_teas = []\n sugar_in_mochas = []\n \n \n for row in drinksreader:\n drink_category = row[drink_category_index]\n sugar_grams = row[sugars_index]\n if 'Latte' in drink_category:\n sugar_in_lattes.append(float(sugar_grams))\n if 'Tea' in drink_category:\n sugar_in_teas.append(float(sugar_grams))\n if 'Mocha' in drink_category:\n sugar_in_mochas.append(float(sugar_grams))\n \n \n beverage_categories = [\"Latte\", 'Tea', 'Mocha']\n #average the sugar content\n average_sugar_in_lattes = np.mean(sugar_in_lattes)\n average_sugar_in_teas = np.mean(sugar_in_teas)\n average_sugar_in_mochas = np.mean(sugar_in_mochas)\n \n average_sugars = [average_sugar_in_lattes, average_sugar_in_teas, average_sugar_in_mochas]\n \n vertical_bar_chart_figure = plt.figure() #The outer container \n vertical_bar_chart_axes = vertical_bar_chart_figure.add_axes([0.1, 0.2, 0.8, 0.9]) #The actual chart inside the figure\n #For more explanation: https://heartbeat.fritz.ai/introduction-to-matplotlib-data-visualization-in-python-d9143287ae39\n \n #Create the bar chart using the bar() method\n #The color argument lets us specify a list of colors for each of the bars\n vertical_bar_chart_axes.bar(beverage_categories, average_sugars, color=[\"pink\", \"blue\", \"green\"])\n \n #Let's customize our chart!\n \n #Give it a title\n vertical_bar_chart_axes.set_title('Vertical bar chart of average sugar in grams for different types of beverages on the Starbucks menu')\n \n #Always label your axis or no one will be able to understand what the chart is showing\n vertical_bar_chart_axes.set_ylabel('Average sugar content in grams')\n vertical_bar_chart_axes.set_xlabel('Beverage type')\n \n #How would we create a horizontal bar chart? Use the barh() method!\n \n horizontal_bar_chart_figure = plt.figure() #The outer container \n horizontal_bar_chart_axes = horizontal_bar_chart_figure.add_axes([0.1, 0.2, 0.8, 0.9]) #The actual chart inside the figure\n #For more explanation: https://heartbeat.fritz.ai/introduction-to-matplotlib-data-visualization-in-python-d9143287ae39\n \n #Create the bar chart using the barh() method\n #The color argument lets us specify a list of colors for each of the bars\n horizontal_bar_chart_axes.barh(beverage_categories, average_sugars, color=[\"pink\", \"blue\", \"green\"])\n \n #Let's customize our chart!\n \n #Give it a title\n horizontal_bar_chart_axes.set_title('Horixontal bar chart of average sugar in grams for different types of beverages on the Starbucks menu')\n \n #Always label your axis or no one will be able to understand what the chart is showing\n horizontal_bar_chart_axes.set_ylabel('Average sugar content in grams')\n horizontal_bar_chart_axes.set_xlabel('Beverage type')\n \n \n \n \n \n\n \n \n \n \n ",
"_____no_output_____"
]
],
[
[
"## Class Dicussion: what do these bar charts show you? Were you surprised by the results?",
"_____no_output_____"
],
[
"# Line Graph\n\nLine graphs are a type of graph where each data point is connected by lines. This can help us understand how something changes in value. In this next example we will use the data we processed in the bar chart examples to create a line graph using the plot() method.",
"_____no_output_____"
]
],
[
[
" line_graph_figure = plt.figure() #The outer container \n line_graph_axes = line_graph_figure.add_axes([0.1, 0.2, 0.8, 0.9]) #The actual chart inside the figure\n #For more explanation: https://heartbeat.fritz.ai/introduction-to-matplotlib-data-visualization-in-python-d9143287ae39\n \n #Create the line graph using the plot() method\n line_graph_axes.plot(beverage_categories, average_sugars)\n \n #Let's customize our chart!\n \n #Give it a title\n line_graph_axes.set_title('Line graph of average sugar in grams for different types of beverages on the Starbucks menu')\n \n #Always label your axis or no one will be able to understand what the chart is showing\n line_graph_axes.set_ylabel('Average sugar content in grams')\n line_graph_axes.set_xlabel('Beverage type')",
"_____no_output_____"
]
],
[
[
"## Activity: make a bar chart and line graph for the average protein in grams for Cappuccinos, Macchiatos, and Smoothies",
"_____no_output_____"
]
],
[
[
"#Make you charts here!\nwith open('starbucks_drinkMenu_expanded.csv') as csvfile: #open the file\n #creates a csv reader object which stores the lines of the files in lists and lets us iterate over them\n drinksreader = csv.reader(csvfile) \n headers = next(drinksreader, None)\n\nprotein_in_cappuccinos = []\nprotein_in_macchiatos = []\nprotein_in_smoothies = []\n\nfor row in drinksreader:\n drink_category = row[drink_category_index]\n sugar_grams = row[sugars_index]\n if 'Cappuccino' in drink_category:\n protein_in_cappuccinos.append(float(protein_grams))\n if 'Macchiato' in drink_category:\n protein_in_macchiatos.append(float(protein_grams))\n if 'Smoothie' in drink_category:\n protein_in_smoothies.append(float(protein_grams))\n \nbeverage_categories = [\"Latte\", 'Tea', 'Mocha']\n#average the sugar content\naverage_protein_in_capp = np.mean(protein_in_cappuccinos)\naverage_protein_in_mach = np.mean(protein_in_macchiatos)\naverage_protein_in_smoothies = np.mean(protein_in_smoothies)\n\naverage_protein = [average_protein_in_capp, average_protein_in_mach, average_protein_in_smoothies]\n\nvertical_bar_chart_figure = plt.figure() #The outer container \nvertical_bar_chart_axes = vertical_bar_chart_figure.add_axes([0.1, 0.2, 0.8, 0.9]) #The actual chart inside the figure\n#For more explanation: https://heartbeat.fritz.ai/introduction-to-matplotlib-data-visualization-in-python-d9143287ae39\n\n#Create the bar chart using the bar() method\n#The color argument lets us specify a list of colors for each of the bars\nvertical_bar_chart_axes.bar(beverage_categories, average_protein, color=[\"pink\", \"blue\", \"green\"])\n\n#Let's customize our chart!\n\n#Give it a title\nvertical_bar_chart_axes.set_title('Vertical bar chart of average protein in grams for different types of beverages on the Starbucks menu')\n\n#Always label your axis or no one will be able to understand what the chart is showing\nvertical_bar_chart_axes.set_ylabel('Average protein content in grams')\nvertical_bar_chart_axes.set_xlabel('Beverage type')\n",
"_____no_output_____"
]
],
[
[
"## Histogram\n\nHistograms are similar to bar charts, but a histogram groups numbers into ranges. The x axis of a histogram typically shows the the value ranges and the y axis corresponds to the number of items in each range. Histograms help us better visulize and understand the distribution of the data for certain values. \n\nIf you have continuous numerical data, in order to group the data into ranges you need to split the data into intervals, as known as bins. Let's look at an example by creating histograms for the sugar content of each type starbucks beverage.\n\nYou might be wondering how do we decided how many bins to use? This is [an interesting topic](https://stats.stackexchange.com/questions/798/calculating-optimal-number-of-bins-in-a-histogram) and there are many ways to choose the bin number. For this class we don't need to worry too much about that and can just try out some different options and choose which one helps us visualize the data best. \n",
"_____no_output_____"
]
],
[
[
"#recall that sugar_in_lattes, sugar_in_teas, and sugar_in_mochas are all lists that store the sugar in grams data\n#We will create histograms that help us better see the sugar content distributions for each type of beverage\n\nnumber_of_bins = 10\n\nlatte_histogram_figure = plt.figure()\nlatte_histogram_axes = latte_histogram_figure.add_axes([0.1, 0.2, 0.8, 0.9])\n\nlatte_histogram_axes.hist(sugar_in_lattes, bins=number_of_bins)\n\nlatte_histogram_axes.set_title('Histogram of sugar content in Lattes')\nlatte_histogram_axes.set_ylabel('Frequency')\nlatte_histogram_axes.set_xlabel('Sugar in grams')\n\ntea_histogram_figure = plt.figure()\ntea_histogram_axes = tea_histogram_figure.add_axes([0.1, 0.2, 0.8, 0.9])\n\ntea_histogram_axes.hist(sugar_in_teas, bins=number_of_bins)\n\ntea_histogram_axes.set_title('Histogram of sugar content in Teas')\ntea_histogram_axes.set_ylabel('Frequency')\ntea_histogram_axes.set_xlabel('Sugar in grams')\n\nmocha_histogram_figure = plt.figure()\nmocha_histogram_axes = mocha_histogram_figure.add_axes([0.1, 0.2, 0.8, 0.9])\n\nmocha_histogram_axes.hist(sugar_in_mochas, bins=number_of_bins)\n\nmocha_histogram_axes.set_title('Histogram of sugar content in Mochas')\nmocha_histogram_axes.set_ylabel('Frequency')\nmocha_histogram_axes.set_xlabel('Sugar in grams')\n\n",
"_____no_output_____"
]
],
[
[
"## Class Dicussion: What observations about the data can you make with these histograms?\n\n## Activity: Create a histogram of the protein content in two different beverages of your choice!",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a173a601a52fb6ace441b0802f465d67ca7f076
| 137,985 |
ipynb
|
Jupyter Notebook
|
notebooks/13_Regression.ipynb
|
Dohako/ml_edu
|
2b780cd5eb8852c3af58f3990814267998e025e7
|
[
"CC0-1.0"
] | 6 |
2020-10-23T17:40:22.000Z
|
2021-12-22T16:01:46.000Z
|
notebooks/13_Regression.ipynb
|
Dohako/ml_edu
|
2b780cd5eb8852c3af58f3990814267998e025e7
|
[
"CC0-1.0"
] | 8 |
2020-09-17T15:01:29.000Z
|
2021-11-10T20:51:49.000Z
|
notebooks/13_Regression.ipynb
|
Dohako/ml_edu
|
2b780cd5eb8852c3af58f3990814267998e025e7
|
[
"CC0-1.0"
] | 19 |
2020-09-22T20:51:33.000Z
|
2022-01-18T17:49:32.000Z
| 137,985 | 137,985 | 0.775381 |
[
[
[
"# Регрессия - последняя подготовка перед боем!",
"_____no_output_____"
],
[
"> 🚀 В этой практике нам понадобятся: `numpy==1.21.2, pandas==1.3.3, matplotlib==3.4.3, scikit-learn==0.24.2, seaborn==0.11.2` \n\n> 🚀 Установить вы их можете с помощью команды: `!pip install numpy==1.21.2, pandas==1.3.3, matplotlib==3.4.3, scikit-learn==0.24.2, seaborn==0.11.2` \n",
"_____no_output_____"
],
[
"# Содержание <a name=\"content\"></a>\n\n * [Лирическое вступление](#Liricheskoe_vstuplenie)\n * [Первые реальные данные](#Pervye_real_nye_dannye)\n * [Анализ одной переменной (унивариантный - univariate)](#Analiz_odnoj_peremennoj_(univariantnyj_-_univariate))\n * [Анализ нескольких переменных (мультивариантный - multivariate)](#Analiz_neskol_kih_peremennyh_(mul_tivariantnyj_-_multivariate))\n * [LSTAT - MEDV](#LSTAT_-_MEDV)\n * [RM - MEDV](#RM_-_MEDV)\n * [Подготовка кода предобработки](#Podgotovka_koda_predobrabotki)\n * [fit()](#fit())\n * [transform()](#transform())\n * [Back to programming!](#Back_to_programming!)\n * [Заключение](#Zakljuchenie)\n * [Вопросы для закрепления](#Voprosy_dlja_zakreplenija)\n* [Полезные ссылки](#Poleznye_ssylki)\n",
"_____no_output_____"
]
],
[
[
"# Настройки для визуализации\r\n# Если используется темная тема - лучше текст сделать белым\r\nimport matplotlib\r\nimport numpy as np\r\nimport pandas as pd\r\nimport seaborn as sns\r\nimport random\r\nTEXT_COLOR = 'black'\r\n\r\nmatplotlib.rcParams['figure.figsize'] = (15, 10)\r\nmatplotlib.rcParams['text.color'] = TEXT_COLOR\r\nmatplotlib.rcParams['font.size'] = 14\r\nmatplotlib.rcParams['lines.markersize'] = 15\r\nmatplotlib.rcParams['axes.labelcolor'] = TEXT_COLOR\r\nmatplotlib.rcParams['xtick.color'] = TEXT_COLOR\r\nmatplotlib.rcParams['ytick.color'] = TEXT_COLOR\r\n\r\nsns.set_style('darkgrid')\r\n\r\n# Зафиксируем состояние случайных чисел\r\nRANDOM_SEED = 42\r\nnp.random.seed(RANDOM_SEED)\r\nrandom.seed(RANDOM_SEED)",
"_____no_output_____"
]
],
[
[
"## Лирическое вступление <a name=\"intro\"></a>",
"_____no_output_____"
],
[
"И снова привет! \n\nК этому моменту мы многому научились и уже знаем немало! Тем не менее, много знаний не бывает, ведь мы приближаемся к первой боевой задаче!\n\nДа-да, скоро вам предстоит самостоятельно провести работу с набором данных! Правда, мы немного считерим, потому что в этой практике с этими данными частично познакомимся, но сделаем это частично, чтобы не забирать у вас всё веселье!\n\nРанее мы много говорили о том, как учить модель машинного обучения, как разделять данные, как анализировать модель и т.д. В работе с данными эта часть зовётся \"обучение и анализ модели\". В этой практике мы поговорим о совершенно новой части в работе с данными и научимся данные анализировать.\n\nЗачем это нужно? Ну, просто обучить модель на данных - это зовётся **baseline**. **Baseline** как правило - это самое быстрое и простое решение, которое даёт результат!\n\nВот, например, у нас есть данные о ценах на земли в городе. Задача - на основе этих данных предсказывать цены на другие участки земли. Самым простым решением будет взять сумму целевых значений (цен) и поделить на количество! Так мы получим среднее значение цены в данных и его можно постоянно предсказывать!\n\nВот таким простым способом мы получили модель, которая всё время предсказывает постоянное значение. Да, у неё есть какая-то ошибка, да, это вообще не будет похоже на зависимость в данных, но не это важно!\n\nВажно то, что имея baseline, вы будете точно знать, относительно какого решения нужно улучшать вашу модель! Уже и MAE/RMSE есть с чем сравнить - одни плюсы!\n\n> Обратите внимание, что показатель R2 как раз в этом случае будет равень 0, так как значения больше нуля - а значит, модель лучше, чем простое предсказание среднего!\n\n> 🤓 **Baseline решение** - простое и быстро достижимое решение, используется для дальнейшей оценки улучшений предсказаний при работе с данными.",
"_____no_output_____"
],
[
"Так вот к чему всё это? Сейчас мы пока что с вами научились строить baseline модели. \n\nА как научиться делать что-то лучше? Вот тут то и не хватает недостающей части, о которой мы с вами поговорим! И часть это зовется - **анализ данных**!\n\nНо зачем он нужен, если модель делает всё за нас? Учится на данных, регуляризацией мы убираем оверфит, на всякий проверим показатели на тестовой выборке - куда лучше?\n\nПоверьте, есть куда стремиться!\n\nВ работе с реальными данными есть простое правило - не сложность модели определяет, кто будет круче, а качество и количество данных!\n\n> ⚠️ Ещё раз, данные важнее, чем модели!\n\nТо есть, важно понимать, что происходит с моделью, оверфит это или нужна сложность модели побольше (недообучение). Но хорошее качество и количество данных могут дать намного больший прирост точности, так как шума и выбросов в них будет меньше, а зависимости более выражены.\n\nИ как же тогда нам сделать данные качественнее, если вот у нас есть датасет, и сделать его больше мы не можем?\n\nОтвет прост - как можно лучше понять данные и предобработать, а для этого - проанализировать их в первую очередь!\n\n> ⚠️⚠️ Очень важный аспект - **понимание данных**. Если вы хорошо понимаете, что за данные вы имеете и что каждый признак означает, то высока вероятность, что вы лучше их обработаете и очистите!\n\nВ таком случае, подводим **итог**! Создавать baseline модели на тех данных, что мы имеем - полезный навык. Но если мы хотим сделать нашу модель ещё круче и эффективнее, то нужно данные проанализировать и подготовить.\n\n> ⚠️ Все новые термины **обработка**, **очистка** и другие действия с данными относятся к общему понятию **подготовка данных** для модели. Baseline может строиться на неподготовленных данных и решать задачу (вероятнее всего плохо), подготовка данных нацелена на улучшение качества данных, чтобы модель, которая на них учится, выявила необходимые зависимости без влияния шума.\n\n> ⚠️ Для реализации хорошей **подготовки данных** необходимо провести **анализ данных**, чтобы данные лучше понять.",
"_____no_output_____"
],
[
"Это всё слова, но пора к делу! \n\nВы ещё увидите, почему анализ данных иногда бывает намного интереснее простого обучения модельки!",
"_____no_output_____"
],
[
"## Первые реальные данные <a name=\"real_data\"></a>",
"_____no_output_____"
],
[
"Настройтесь, сейчас мы с вами загрузим наши первые реальные данные и начнём с ними работать. Чувствуете это предвкушение?\r\n\r\n<p align=\"center\"><img src=\"https://vk.com/sticker/1-2920-512-9\" width=300/></p>\r\n\r\nСтоп, а где эти данные взять?\r\n\r\nНе переживайте, сегодня не вы одни занимаете наукой о данных, поэтому есть очень много ресурсов с разными данными, а мы постучимся на [Kaggle](https://www.kaggle.com/)! Для начала вам нужно там зарегистрироваться, если вы этого ещё не сделали! \r\n\r\nДальше, нам нужно достать данные, которые нам сейчас нужны - мы воспользуемся [этим датасетом](https://www.kaggle.com/fedesoriano/the-boston-houseprice-data). После регистрации у вас будет возможность скачать CSV файл `boston.csv`.\r\n\r\nПосле этого всё зависит от того, где вы работаете. Если вы проходите практики на Google Colab, то вам нужно загрузить файл с данными на сам Colab (для этого есть меню слева).\r\n\r\nЕсли вы работаете локально, на своей машине (компьютере), то достаточно положить рядом с ноутбуком!\r\n\r\n> ✨ Если вы всё выполнили верно, то код дальше будет выполняться без проблем. Если нет - обратитесь к преподавателю за помощью!",
"_____no_output_____"
]
],
[
[
"df_src = pd.read_csv('boston.csv')",
"_____no_output_____"
]
],
[
[
"Когда данные успешно загружены, то важно первым делом посмотреть на размер данных и на сами данные!",
"_____no_output_____"
]
],
[
[
"df_src.shape",
"_____no_output_____"
],
[
"df_src.head(10)",
"_____no_output_____"
],
[
"df_src.info()",
"_____no_output_____"
],
[
"# И конечно, сразу посмотреть на общие пропуски в данных\r\ndf_src.isnull().sum()",
"_____no_output_____"
]
],
[
[
"Смотрите, пара действий, а мы уже видим некоторую информацию о данных. \n\n* Во-первых, у нас есть 14 переменных, из которых как минимум одну мы планируем предсказывать. \n\n* Во-вторых, во всём наборе данных есть всего 506 записей (примеров). Это немного, но хватит, чтобы много обсудить!\n\nНо здесь есть важная особенность, каждая колонка имеет название, но все они в виде аббревиатур! Это плохо, так как это затруднит разбор данных и может ухудшить понимание. Небольшой поиск по странице датасета и в интернете даёт как минимум два источника, в которых есть следующая информация о данных:\n- https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html#:~:text=The%20Boston%20Housing%20Dataset,the%20area%20of%20Boston%20Mass\n- https://scikit-learn.org/stable/datasets/toy_dataset.html#boston-house-prices-dataset\n\nИнформация о колонках:\n- CRIM - per capita crime rate by town\n- ZN - proportion of residential land zoned for lots over 25,000 sq.ft.\n- INDUS - proportion of non-retail business acres per town\n- CHAS - Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)\n- NOX - nitric oxides concentration (parts per 10 million)\n- RM - average number of rooms per dwelling\n- AGE - proportion of owner-occupied units built prior to 1940\n- DIS - weighted distances to five Boston employment centres\n- RAD - index of accessibility to radial highways\n- TAX - full-value property-tax rate per $10,000\n- PTRATIO - pupil-teacher ratio by town\n- B - 1000(Bk - 0.63)^2 where Bk is the proportion of black people by town\n- LSTAT - % lower status of the population\n- MEDV - Median value of owner-occupied homes in $1000’s",
"_____no_output_____"
],
[
"Отлично, какая-то информация есть и её можно перевести с английского, что даёт нам:\n\n- CRIM - уровень преступности на душу населения по городам\n- ZN - доля жилой земли, зонированной для участков площадью более 25 000 кв. футов.\n- INDUS - доля акров нетоварного бизнеса в городе\n- CHAS - переменная-флаг приближенности к реке (= 1 если рядом с рекой; 0 в ином случае)\n- NOX - концентрация оксидов азота (частей на 10 миллионов)\n- RM - среднее количество комнат в одном жилом помещении\n- AGE - доля квартир, занятых владельцами, построенных до 1940 года\n- DIS - взвешенные расстояния до пяти бостонских центров занятости\n- RAD - индекс доступности радиальных магистралей\n- TAX - недвижимость с полной стоимостью-ставка налога за 10 000 долларов США\n- PTRATIO - соотношение числа учащихся и учителей по городам\n- B - 1000(Bk - 0.63)^2, где Bk - доля чернокожего населения по городам\n- LSTAT - процент бедности населения\n- MEDV - средняя стоимость домов, занятых владельцами, в 1000 долларов США\n\nШикарно, это пригодится нам в ходе анализа!\n\nУже сейчас мы можем сформировать постановку задачи предсказания - нам нужно предсказывать **цену дома (MEDV)** по 13-ти имеющимся признакам. Не факт, что мы всеми признаками воспользуемся, но всё-таки это то, что мы сейчас имеем.\n\n> Не бойтесь, работа с 13 переменными, когда мы вот только работали всего с одной - не так страшна, как кажется. Более того, когда мы строили полиномиальную регрессию 15-го порядка, то там у нас было аж 15 признаков!",
"_____no_output_____"
],
[
"Так с чего же начинается анализ данных? Самое простое - с анализа каждой переменной!\n\nЧто мы хотим увидеть? В анализе одной переменной важно понять:\n\n- что представляет из себя переменная\n- есть ли у неё пропуски и как лучше их заполнитиь\n- есть ли у переменной явные выбросы\n- какое у переменной распределение и есть ли смещение\n- и другие интересности, которые мы заметим =)\n\nВ этой практике мы пройдёмся по наиболее важным переменным, а вот в реальной задаче вам предстоит проанализировать каждую переменную! Так можно составить более полную картину данных!\n\n> ⚠️ Этот список не исчерпывающий, но он сообщает, что любые странности и закономерности в данных важно выявить и проанализировать на предмет того, полезный ли эффект наблюдается или его лучше убрать, чтобы моделе было проще искать базовые зависимости в данных.",
"_____no_output_____"
],
[
"## Анализ одной переменной (унивариантный - univariate) <a name=\"uni\"></a>",
"_____no_output_____"
],
[
"Начнем с анализа под названием унивариантный. Он так называется, потому что мы анализируем каждую переменную по отдельности. Обычно, самым простым вариантом является построение распределения переменной, чтобы понять характер распределения.\n\nЗдесь для примера мы возьмем переменную RM (среднее количество комнат в одном жилом помещении).",
"_____no_output_____"
]
],
[
[
"sns.displot(df_src['RM'], kde=True, height=7, aspect=1.5)",
"_____no_output_____"
]
],
[
[
"Что мы видим на графике? \n\nРаспределение этой переменной близко к нормальному (Gauss-like - близко к Гауссовому). \n\nПределы значений в диапазоне около [3; 9] комнат. \n\nЗдесь важный акцент мы сделаем на \"нормальности\" распределения, так как бывают разные вариации нормальности. При анализе другой переменной мы это увидим.\n\nТогда по этой переменной мы можем заключить следующее: \n\n* по таблице пропусков переменная пропусков не имеет\n* распределение близкое к нормальному\n* значения лежат в пределах, ожидаемых для описания этой переменной - количество комнат. \n\nНе сложно, правда?",
"_____no_output_____"
],
[
"Другую переменную мы возьмём явно с интересным эффектом:",
"_____no_output_____"
]
],
[
[
"sns.displot(df_src['DIS'], kde=True, height=7, aspect=1.5)",
"_____no_output_____"
]
],
[
[
"Вот эту переменную уже сложнее назвать нормально распределённой. Она имеет явное **смещение влево**. Ещё это назвают **правый хвост**, так как правая часть похожа на хвост.\n\nЧто делать с такими переменными? \n\nНу, есть разные способы. Тут мы уже с вами говорим про методы модификации данных, а значит начинаем строить план обработки данных!\n\nМожно выделить два наиболее явных способа исправления распределения:\n\n- исправление с помощью логарифма (он исправляет левое смещение)\n- воспользоваться автоматизированными способами коррекции, например, [PowerTransformer](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PowerTransformer.html)\n\nПервый способ мы попробуем сейчас, а вот со вторым вы можете разобраться самостоятельно, когда в следующей практике ринетесь в бой!",
"_____no_output_____"
]
],
[
[
"dis_log_col = np.log(df_src['DIS'])\r\n\r\nsns.displot(dis_log_col, kde=True, height=7, aspect=1.5)",
"_____no_output_____"
]
],
[
[
"Как видите, центр распределения сместился ближе к середине и само распределение стало больше похоже на нормальное, результат - успех!\n\n> 🔥 Не только в DS, но и в других областях, где вы модифицируете данные - всегда проверяйте результат и сравнивайте с ожиданиями! Это важно, так как без проверки промежуточного результата может появиться проблема, которая доставит много головной боли потом!\n\n> ⚠️ Исправление распределения очень важно для линейных моделей. Мы сейчас не заостряем внимание на этом, но в следующей самостоятельной практике обязательно сравните результаты с исправлением и без него!\n\nВ результате, вывод по переменной:\n\n* пропусков не имеет\n* *распределение смещено, поэтому требуется исправление*\n\nПоследний вывод важно записать в список дел, так как по результатам мы будм делать всю обработку данных единым образом.",
"_____no_output_____"
],
[
"Давайте для примера возьмём ещё одну переменную, чтобы проанализировать нестандартное распределение:",
"_____no_output_____"
]
],
[
[
"sns.displot(df_src['CHAS'], kde=True, height=7, aspect=1.5)",
"_____no_output_____"
]
],
[
[
"Можно было бы сказать, что распределение смещено влево, но обратите внимание - в данных всего два значения: 0 и 1. Давайте это проверим:",
"_____no_output_____"
]
],
[
[
"df_src['CHAS'].unique()",
"_____no_output_____"
]
],
[
[
"Действительно, что же нам в таком случае делать? \n\nДа ничего, это распределение бимодальное, поэтому мы не будем пытаться его исправить. \n\nВывод по переменной: \n\n* пропусков нет\n* распределение бимодальное\n\nДелать с этой переменной пока ничего не будем!\n\nОстальные переменные мы оставим за кадром, чтобы вам тоже было, с чем поработать!\n\nПо результату анализа одной переменной делается вывод об основных особенностях каждой переменной. Мы с вами ещё научимся другим подходам анализа и многому интересному, но пока достаточно понимать следующие вещи:\n\n- имеет ли переменная пропуски (как их заполнять узнаем потом)?\n- понимаем ли мы суть переменной, сходится ли с описанием и логичные ли значения?\n- нужно ли корректировать распределение?",
"_____no_output_____"
],
[
"## Анализ нескольких переменных (мультивариантный - multivariate) <a name=\"multi\"></a>",
"_____no_output_____"
],
[
"Вот мы переходим к более вкусному анализу - зависимости между переменными!\n\nИ начнем мы с определения **корреляций**! \n\nМы уже много говорили о том, что в данных есть зависимости, но наблюдали мы их только на графиках. Как и во всех методах - хорошо бы иметь метод, который численно подтвердит наличие зависимости в данных! Есть он у меня для вас!\n\nДля примера мы возьмём пару переменных - полный анализ (все переменные) вы проведёте самостоятельно!",
"_____no_output_____"
]
],
[
[
"# Для примера выберем следующие признаки\r\n# Мы специально включили целевую переменную, чтобы показать, как проводить вместе в ней анализ\r\nfeatures = ['CRIM', 'LSTAT', 'RM', 'MEDV']\r\n\r\ncorrelation_mtrx = df_src[features].corr()\r\ncorrelation_mtrx",
"_____no_output_____"
]
],
[
[
"Таблица - это хорошо, но, как обычно, график лучше воспринимается =)",
"_____no_output_____"
]
],
[
[
"sns.heatmap(correlation_mtrx, annot=True, fmt='.2f')",
"_____no_output_____"
]
],
[
[
"Корреляция - это способ численно показать наличие зависимости между двумя переменными. \r\n\r\nДавайте попробуем проанализировать то, что мы видим здесь. \r\n\r\nС целевой переменной (MEDV) имеют близкую к высокой корреляция (считается, что высокая корреляция +/- 0.8-0.85 и выше по модулю) переменные RM и LSTAT. Это **может** означать, что эти переменные сильнее влияют на формирование цены, чем признак CRIM. \r\n\r\nПочему **может**? Да потому, что коэффициент корреляции - это лишь число, которое может не полностью отражать картину, поэтому такие выводы должны лишь заставлять задуматься, но ни в коем случае не делать конечные выводы лишь на основе корреляции!\r\n\r\n> 🤓 Корреляция всегда оценивается по модулю. Она может быть как высокой положительной, так и высокой отрицательной. Это для случая коэффициента Пирсона. Есть и другие коэффициенты, которые имеют диапазон [0; 1], но это уже совсем другая история =) \r\n\r\nПоглядите, что такое корреляция на более общем представлении разных ситуаций:\r\n\r\n<p align=\"center\"><img src=\"https://raw.githubusercontent.com/kail4ek/ml_edu/master/assets/correlations.png\" width=600/></p>\r\n\r\n> ⚠️ Высокая корреляция переменных между собой является эффектом **мультиколлинеарности признаков**. Это плохой эффект для модели, так как в случае сильной взаимосвязи переменных между собой модель может запутаться в расставлении весов независимым переменным. Они ведь не просто так зовутся независимыми! Одна из практик - в данных для предсказания оставлять одну из пары зависимых между собой переменных, а другую убирать из данных.\r\n\r\nПо умолчанию, метод `.corr()` вычисляет коэффициент корреляции Пирсона. Этот тип коэффициента корреляции хорошо оценивает линейные зависимости. Попробуйте разобраться в документации, как оценить корреляцию по Спирману (Spearman) и выведите матрицу. Оцените, как изменились коэффициенты. Как изменился показатель на LSTAT-MEDV? Почему?",
"_____no_output_____"
]
],
[
[
"# TODO - выведите матрицу корреляции по Спирману и проанализируйте ее",
"_____no_output_____"
]
],
[
[
"Отлично, вот так незатейливо мы научились анализировать зависимости в данных без просмотра данных. \n\nНа основе этого мы можем построить первоначальные выводы, но не посмотреть на данные (визуализировать их) - это сродне очень серьезной ошибке. Всегда важно по максимуму визуализировать данные и просматривать их. Так можно тщательнее провести анализ и узнать больше полезной информации о данных!\n\nПоэтому, давайте воспользуемся хитрым графиком для отображения зависимостей между данными:",
"_____no_output_____"
]
],
[
[
"sns.pairplot(df_src[features], diag_kind='auto', height=6)",
"_____no_output_____"
]
],
[
[
"Что мы видим на графике? \n\nПо главной диагонали отображается распределение самой переменной, так как на 2d графике показывать точки переменной самой с собой - это будет просто линия. В отличных от диагональных ячейках располагаются графики распределения в плоскости одной переменной против другой.\n\nЗдесь сразу можно сделать два вывода:\n- LSTAT-MEDV имееть нелинейную зависимость (видите, как замедляется уменьшение MEDV при увеличении LSTAT?)\n- На графике RM-MEDV видны точки, который очень \"странно\" лежат. Явно видно, что с увеличением RM MEDV растёт, но есть несколько точек, которые лежат как бы на прямой, вне зависимости от RM. Их нужно проанализировать!\n\nДавайте перейдем к конкретному разбору!",
"_____no_output_____"
],
[
"### LSTAT - MEDV <a name=\"lstat_medv\"></a>\n\nПопробуем вывести точечный график переменных:",
"_____no_output_____"
]
],
[
[
"sns.scatterplot(x='LSTAT', y='MEDV', data=df_src)",
"_____no_output_____"
]
],
[
[
"Здесь явно выделяется нелинейная зависимость, поэтому мы в ходе предобработки сформируем новый признак - вторая степень от LSTAT. Это обусловлено этой явной нелинейностью. Запишем в планы!",
"_____no_output_____"
],
[
"### RM - MEDV <a name=\"rm_medv\"></a>\n\nАналогично более подробно смотрим точечный график переменных:",
"_____no_output_____"
]
],
[
[
"sns.scatterplot(x='RM', y='MEDV', data=df_src)",
"_____no_output_____"
]
],
[
[
"Смотрите, у на есть два типа потенциальных **выбросов**. \n\n* Одни выбросы - лежат на прямой на уровне около MEDV ~= 50. \n* Другие - выбиваются от общей зависимости в диапазонах: RM < 4 и (RM > 8 & MEDV < 30).\n\nПри обработке выбросов важно смотреть, что из себя представляют данные, поэтому выведем примеры и глянем на них:",
"_____no_output_____"
]
],
[
[
"outliers_1 = df_src[df_src['MEDV'] >= 50]\r\noutliers_2 = df_src[(df_src['RM'] < 4) | ((df_src['RM'] > 8) & (df_src['MEDV'] < 30))]",
"_____no_output_____"
],
[
"outliers_1",
"_____no_output_____"
],
[
"outliers_2",
"_____no_output_____"
]
],
[
[
"Давайте посмотрим, выбросы по уровню цены = 50, которые очень нестандартно лежат на плоскости. \n\nПо данным явно не видно очевидной зависимости, поэтому трудно сразу сказать, что это явные выбросы. Как правило, выбросы имеют сильные искажения в данных, что видно и по другим переменным.\n\nЕсли всмотреться, то выбиваются именно точки, которые имеют RM < 7, а у них значение TAX = 666. Если построить распределение переменной TAX (вы это проделаете сами), то можно заметить, что значение 666 отстоит от основных данных, но таких записей с этим значением - аж 130, что сложно назвать выбросом.\n\nТем не менее, это повторяется и в выбросах, которые отстают от основной группы точек, что наводит на мысль, что это всё-таки их обощает.\n\nОдно из предположений, которое можно сделать - **цензурирование данных**. Это подход, при котором в данных суммы и информация, которую важно закрыть, заменяется каким-то константным значением.\n\nПоэтому, при обработке, мы удалим эти данные, так как цензурирование искажает зависимости и это может сказаться на результатах работы.",
"_____no_output_____"
],
[
"Давайте попробуем подчистить данные и посмотреть, как изменятся распределения точек на графиках:\n\n> ⚠️ Очистка данных - процесс очень выборочный, поэтому важно ещё раз всё перепроверять, чтобы не совершить ошибки, так как в результате данных становится меньше.\n\n> ⚠️ В ходе очистки удаляются записи данных - строки.",
"_____no_output_____"
]
],
[
[
"outliers_mask_1 = df_src['MEDV'] == 50\noutliers_mask_2 = df_src['RM'] < 4\noutliers_mask_3 = (df_src['RM'] > 8) & (df_src['MEDV'] < 30)\n\noutliers_mask = outliers_mask_1 | outliers_mask_2 | outliers_mask_3\n\ndf_cleaned = df_src.loc[~outliers_mask]\n\nsns.pairplot(df_cleaned[features], diag_kind='auto', height=6)",
"_____no_output_____"
]
],
[
[
"Как видите, график стал почище, а зависимость RM-MEDV стала более выраженной. Можем даже по-новой проверить корреляцию:\n\n> ⚠️ Если вы обратили внимание, что на графике CRIM-MEDV много точек лежит на значении CRIM=0 - молодцы! Внимательность - это отлично! В данном случае мы не рассматриваем их в качестве кандидатов на выбросы, так как их мало и нам ещё помогает **смысл переменной**: много домов с низким криминальным уровнем - это нормально.",
"_____no_output_____"
]
],
[
[
"sns.heatmap(df_cleaned[features].corr(), annot=True, fmt='.2f')",
"_____no_output_____"
]
],
[
[
"RM-MEDV ранее был 0.7, а теперь стал 0.73 и всё благодаря чистке данных!",
"_____no_output_____"
],
[
"Как видите, как анализ одной переменной, так и анализ нескольких переменных не отличается чем-то сверх-научным. Как правило, данные достаточно посмотреть, пропустить через пару вычислений (как, например, корреляция) и уже можно составлять определённую картину.\n\nТакже, в подготовке и очистке данных помогает понимание данных. Так, например, если бы в наших данных количество комнат (RM) имело бы значения -1, то мы понимали бы, что такого быть не может и тоже рассматривали бы это как выбросы.\n\nВ результате, мы научились базовому анализу нескольких переменных (multivariate), рассмотрели, как можно детектировать выбросы и как оценивать зависимости численно - отличный результат, мы молодцы!",
"_____no_output_____"
],
[
"## Подготовка кода предобработки <a name=\"preproc\"></a>",
"_____no_output_____"
],
[
"Помимо того, что на каждом из этапов анализа проверяется своя подготовка, очистка и другая обработка данных - важно в конечном итоге сформировать единый код для предобработки данных, чтобы пользоваться было им удобно и он был более-менее универсален (была возможность применить его на новых данных).\n\nДавайте выделим два этапа:\n\n* очистка данных\n* предобработка\n\nОчистка делается для процесса обучения, чтобы модели предоставить более чистые данные без выбросов и лишнего шума.\nПредобработка делатся как для обучения, так и для обработки новых данных.\n\n> ⚠️ Помним, что конечная цель модели машинного обучения не просто обучиться и показать высокую метрику, а давать предсказания на новых данных и делать это хорошо.\n\nТак вот важно предобработку нормально оформить, чтобы потом не пришлось корячиться с кодом, когда надо будет его разворачивать в облаке =)\n\nДля этого нам поможет парадигма классов в Python!\n\nНо перед этим, мы быстренько оформим код очитки данных:",
"_____no_output_____"
]
],
[
[
"# TODO - напишите функцию clean_dataset(), который принимает DataFrame на вход и выдает его очищенным\r\n# NOTE - в функции надо выбрать выбросы той методикой, которую мы уже выработали и вернуть почищенный датасет",
"_____no_output_____"
],
[
"# TEST\r\n\r\n_test_df = pd.DataFrame({\r\n 'MEDV': [10, 20, 50, 50, 30, 10],\r\n 'RM': [5, 6, 7, 7, 3, 8],\r\n})\r\n_test_result = clean_dataset(_test_df)\r\n\r\npd.testing.assert_index_equal(pd.Index([0, 1, 5]), _test_result.index)\r\n\r\nprint(\"Well done!\")",
"_____no_output_____"
]
],
[
[
"Отлично, функция очистки написана и её мы применим только для нашего датасета, поэтому её универсальность не так важна!\n\nА теперь приступим к проработке класса для нашей собственной предобработки!\n\nНачнём с архитектуры, вот так будет выглядеть наш класс:",
"_____no_output_____"
]
],
[
[
"class DataPreprocessing:\r\n def __init__(self):\r\n pass\r\n\r\n def fit(self, df):\r\n pass\r\n\r\n def transform(self, df):\r\n return df",
"_____no_output_____"
]
],
[
[
"Вот и весь класс, ничего страшного =)\n\nТолько, его методы (а-ля функции) ещё не реализованы, поэтому рано говорить о размерах кода =)\n\nДавайте обсудим, что мы уже написали и зачем нужны эти методы:",
"_____no_output_____"
],
[
"### fit() <a name=\"fit\"></a>\n\n`.fit()` - это метод, который занимается сбором статистики с данных, чтобы их потом обработать. Собранную статистику мы будет хранить в атрибутах класса.\n\nЧто такое *сбор статистики*? \n\nВсё просто. Давайте вспомним, как в прошлый раз масштабировали данные с помощью MinMaxScale. По сути, нам нужно вычислить минимум и максимум в данных и затем применить формулу с этими константами.\n\nА теперь вспомним, что нам надо масштабировать на обучающей выборке и выборке для теста.\n\nДавайте рассмотрим плохой вариант (*неправильный*): мы вычисляем мин-макс на обучающей выборке, допустим, получили (минимум = 10 и максимум = 100). Преобразовали обучающую выборку и всё ок.\n\nТеперь, берём тестовую и вычисляем то же самое (получаем, минимум = 20 и максимум = 105). Преобразовали тестовую выборку.\n\nА что дальше?\n\nНу, модель обучится, ведь обучение - простая математика и предсказания будут как-то работать, но будет **концептуальная** ошибка!\n\nИменно в том, что модель учится на данных, ей приходит значение признака 1.0, а в исходных данных 1.0 ~ 100 (ведь максимум на обучающей = 100). Потом мы передаём тестовую и там тоже есть значение 1.0, но только на тестовой это означает 105. \n\nК чему это приводит?\n\nМодель ничего не заметит, сделает предсказание, а в нём будет ошибка! Ведь мы, хоть и не специально, начинаем модель путать, подавая данные, которые означают совсем другое, нежели на чём модель училась.\n\nЧто же мы можем сделать?\n\nА что если, мы на обучающей выборке найдем минимум и максимум, запомним их и применим как к обучающей, так и тестовой выборке! Тогда, во всех данных (и даже в новых), 1.0 будет означать 100 и мы никого путать не будем!\n\n> 🤓 Да, в нашем случае на тестовой будут значения больше 1.0, но это не страшно! Главное для масштабирования - привести к одинаковым порядкам, а для правильной обработки - собрать статистику на обучающей выборке (train) и дальше применять её для трансформации как на обучающей, так и на тестовой выборке!\n\nТак вот мы и подошли к главному правилу в организации `fit()-transform()`: `fit()` всегда применяется только на train выборке! Эта функция собирает статистику, а её надо собирать только на обучающей выборке! На полной (train+test), не тестовой (test), а только на обучающей (train)!",
"_____no_output_____"
],
[
"### transform() <a name=\"transform\"></a>\n\nНу тут уже все проще. Все этапы обработки данных, что требуют сбор статистики - собирают в `fit()`, ну а дальше просто применяем всю обработку в `transform()`! Все просто! =)",
"_____no_output_____"
],
[
"## Back to programming! <a name=\"prog\"></a>\n\nОтлично, мы разобрались, зачем нужен каждый метод! Давайте попробуем написать свой класс для предобработки!\n\nРеализуем следующую предобработку:\n- Выравнивание распределения для признака `DIS` с помощью логарифма\n - Нужно создать новый признак `DIS_log`, а старый удалить\n- Генерация полиномиального признака для `LSTAT` с названием `LSTAT_poly_2`\n- MinMaxScale - посмотрите на класс [MinMaxScaler](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html)\n - Сделайте масштабирование всех признаков\n\nПо сути, это небольшой набор того, как мы запланировали предобработать данные по результатам анализа!\n\n> 🔥 Объекты трансформеров из `sklearn` работают по аналогичному принципу, как мы с вами обсудили. Поэтому, при работе с ними можно сами объекты трансформеров создавать прямо в конструкторе нашего класса. `fit()` трансформеров вызывать в нашем методе `fit()`, ну и `transform()`, соответственно.",
"_____no_output_____"
]
],
[
[
"# TODO - реализуйте описанную предобработку\r\n\r\nclass DataPreprocessing:\r\n def __init__(self):\r\n pass\r\n\r\n def fit(self, df):\r\n # Скопируем исходные данные, чтобы не изменять их\r\n df_copy = df.copy()\r\n\r\n # Здесь обратите внимание, что нужно сгенерировать полином и выровнять логарифмом, чтобы MinMaxScaler обучился и на них тоже\r\n pass\r\n\r\n def transform(self, df):\r\n # Возвращать transform() должен тоже DataFrame!\r\n return df",
"_____no_output_____"
],
[
"# TEST\r\n\r\n_test_df = pd.DataFrame({'DIS': [2.3, 1.9, 0.4, 2.2], 'LSTAT': [0.1, 0.2, 0.3, 0.4], 'MORE_FEAT': [1, 2, 3, 4]}, index=[4, 6, 10, 12])\r\n\r\npreproc = DataPreprocessing()\r\n\r\npreproc.fit(_test_df)\r\n\r\n_test_result = preproc.transform(_test_df)\r\n\r\n_test_expected = pd.DataFrame({\r\n 'DIS_log': [1.0, 0.8907756387942631, 0.0, 0.9745873735075969], \r\n 'LSTAT': [0.0, 0.333, 0.666, 1.0], \r\n 'LSTAT_poly_2': [0.0, 0.2, 0.5333, 1.], \r\n 'MORE_FEAT': [0.0, 0.333, 0.666, 1.0]\r\n}, index=_test_df.index)\r\n\r\npd.testing.assert_frame_equal(_test_result, _test_expected, check_like=True, atol=1e-3)\r\n\r\nprint(\"Well done!\")",
"_____no_output_____"
]
],
[
[
"Если вы прошли тест - значит вы большие молодцы!!\n\nВ результате такой класс можно спокойно применять для подготовки данных для обучения модели и более того, для подготовки данных при поступлении новых!\n\nА это значит, мы ещё не обучили, но уже готовы предсказывать и показывать, как круто наша модель работает! Стремимся к высоким целям! ",
"_____no_output_____"
],
[
"## Заключение <a name=\"conclusion\"></a>",
"_____no_output_____"
],
[
"В результате прохождения этой практики вы узнали очень важный факт (а может и несколько). \n\n**Анализ данных нужен и важен!**\n\nКонечно, мы только увидели пару приёмов, но в следующей практике, вы попробуете их в бою и увидите, что это действительно работает!",
"_____no_output_____"
],
[
"## Вопросы для закрепления <a name=\"qa\"></a>\n\nА теперь пара вопросов, чтобы закрепить материал!\n\n1. Зачем нужны классы в DS?\n2. Чем полезна предобработка данных? \n3. Опасно ли удалять какие-то данные из исходных? Когда можно такое делать? \n4. На какой выборке применяется метод-fit?\n5. На какой выборке применяется метод-transform?",
"_____no_output_____"
],
[
"# Полезные ссылки <a name='links'></a>\r\n* [Linear Discriminant Analysis (LDA) от StatQuest](https://www.youtube.com/watch?v=azXCzI57Yfc)\n* [Basic Statistics for Data Science на Medium](https://medium.com/mlearning-ai/important-statistical-concepts-for-data-scientists-54e09106b75e)\n* [Quartiles for Beginners in DS на Medium](https://medium.com/@vinitasilaparasetty/quartiles-for-beginners-in-data-science-2ca5a640b07b)\n* [Understanding Value of Correlations in DS на Medium](https://medium.com/fintechexplained/did-you-know-the-importance-of-finding-correlations-in-data-science-1fa3943debc2)\n* [Correlation](https://luminousmen.com/post/data-science-correlation)\n* [Fundamentals of Statistics](https://towardsdatascience.com/fundamentals-of-statistics-for-data-scientists-and-data-analysts-69d93a05aae7)\n",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
4a17408a281b6f8fcc9e54043306f4343a6c7edd
| 816,772 |
ipynb
|
Jupyter Notebook
|
content/lessons/13/Watch-Me-Code/WMC4-More-Matplotlib.ipynb
|
MahopacHS/spring-2020-oubinam0717
|
5b35579e658e34cbb07c3477a9fce13ce01830af
|
[
"MIT"
] | 14 |
2017-02-23T21:00:46.000Z
|
2021-03-19T09:29:40.000Z
|
content/lessons/13/Watch-Me-Code/WMC4-More-Matplotlib.ipynb
|
MahopacHS/spring-2020-oubinam0717
|
5b35579e658e34cbb07c3477a9fce13ce01830af
|
[
"MIT"
] | null | null | null |
content/lessons/13/Watch-Me-Code/WMC4-More-Matplotlib.ipynb
|
MahopacHS/spring-2020-oubinam0717
|
5b35579e658e34cbb07c3477a9fce13ce01830af
|
[
"MIT"
] | 38 |
2017-02-03T13:49:19.000Z
|
2021-08-15T16:47:56.000Z
| 866.1421 | 245,722 | 0.936251 |
[
[
[
"# Watch Me Code 4: More Matplotlib\n\n- Data Analysis of Syracuse Weather with Plotting\n",
"_____no_output_____"
]
],
[
[
"%matplotlib inline \nimport matplotlib\nmatplotlib.rcParams['figure.figsize'] = (20.0, 10.0) # larger figure size\nimport pandas as pd\nimport numpy as np\nfrom pandas import DataFrame, Series",
"_____no_output_____"
],
[
"weather = pd.read_csv(\"https://raw.githubusercontent.com/mafudge/datasets/master/weather/syracuse-ny.csv\")",
"_____no_output_____"
],
[
"weather.head()",
"_____no_output_____"
],
[
"weather['Events'].unique()",
"_____no_output_____"
]
],
[
[
"Let's get just the thunderstorms!",
"_____no_output_____"
]
],
[
[
"thunder = weather[ weather['Events'].str.find('Thunderstorm') >=0 ]\nthunder.head()",
"_____no_output_____"
]
],
[
[
"The percentage of days it thunders in Syracuse, historically",
"_____no_output_____"
]
],
[
[
"thunder.EST.count() / weather.EST.count()",
"_____no_output_____"
],
[
"weather.columns",
"_____no_output_____"
]
],
[
[
"What is the relationship between Temperature and Dewpoint?",
"_____no_output_____"
]
],
[
[
"weather.plot.scatter( x = 'Mean TemperatureF', y = 'MeanDew PointF')",
"_____no_output_____"
]
],
[
[
"INSIGHT: Positive correlation between tem and dewpoint. Every meteroloogist knows this. ;-)\n\nWhat is the relationship between cloud cover and visibility?",
"_____no_output_____"
]
],
[
[
"weather.plot.scatter( x = 'CloudCover', y = 'Mean VisibilityMiles')",
"_____no_output_____"
]
],
[
[
"As one would expect the less cloud cover the greater visibility. \n\nHow about temperature and wind speed?",
"_____no_output_____"
]
],
[
[
"weather.plot.scatter( x = 'Mean TemperatureF', y = 'Mean Wind SpeedMPH')",
"_____no_output_____"
]
],
[
[
"not much of an insight there, but...\n\nwhen you look at the relationship on days where it thunders:",
"_____no_output_____"
]
],
[
[
"thunder.plot.scatter( x = 'Mean TemperatureF', y = 'Mean Wind SpeedMPH')",
"_____no_output_____"
]
],
[
[
"We see that it doesn't really thunder when its cold out!\n\nThis plot it interesting. It shows when the temperature is cold, the wind isn't coming out of the south. Make sense for Syracuse.",
"_____no_output_____"
]
],
[
[
"weather.plot.scatter( x = 'Mean TemperatureF', y = 'WindDirDegrees') ",
"_____no_output_____"
],
[
"weather['Events'].unique()",
"_____no_output_____"
],
[
"weather['Events'] = weather.Events.fillna('None')",
"_____no_output_____"
],
[
"weather['Diff TemperatureF'] = weather['Max TemperatureF'] - weather['Min TemperatureF']",
"_____no_output_____"
],
[
"import matplotlib \nmatplotlib.rcParams['figure.figsize'] = (20.0, 10.0) # larger figure size",
"_____no_output_____"
],
[
"weather['date'] = pd.to_datetime(weather.EST) # make timeseries data",
"_____no_output_____"
],
[
"# let's plot the temperature swings For may 2015\nweather[weather['EST'].str.find(\"2015-5\") >=0 ].plot.line( x = 'date', y = 'Diff TemperatureF')",
"_____no_output_____"
],
[
"w2015 = weather[ weather.date > '2015-01-01']\nw2015.plot.line(x = 'date', y =['Max TemperatureF', 'Min TemperatureF'] )",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a1746e61ac4f4aa8fbd269b4cc8df9d55cd7bee
| 246,220 |
ipynb
|
Jupyter Notebook
|
exercises/minimal_clustering&classification/clustering-gaussian-iris.ipynb
|
kolibril13/data-science-and-big-data-analytics
|
68abb8c8aed3ecd73ad3de62d6f41894893f37a6
|
[
"Apache-2.0"
] | 1 |
2019-12-15T19:42:06.000Z
|
2019-12-15T19:42:06.000Z
|
exercises/minimal_clustering&classification/clustering-gaussian-iris.ipynb
|
kolibril13/data-science-and-big-data-analytics
|
68abb8c8aed3ecd73ad3de62d6f41894893f37a6
|
[
"Apache-2.0"
] | null | null | null |
exercises/minimal_clustering&classification/clustering-gaussian-iris.ipynb
|
kolibril13/data-science-and-big-data-analytics
|
68abb8c8aed3ecd73ad3de62d6f41894893f37a6
|
[
"Apache-2.0"
] | null | null | null | 518.357895 | 80,128 | 0.93057 |
[
[
[
"%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns; sns.set()",
"_____no_output_____"
],
[
"plt.rcParams['figure.dpi'] = 150 ",
"_____no_output_____"
],
[
"from matplotlib.patches import Ellipse # elipse plot for EM-Model\ndef draw_ellipse(position, covariance, ax=None, **kwargs):\n \"\"\"Draw an ellipse with a given position and covariance\"\"\"\n ax = ax or plt.gca()\n \n # Convert covariance to principal axes\n if covariance.shape == (2, 2):\n U, s, Vt = np.linalg.svd(covariance)\n angle = np.degrees(np.arctan2(U[1, 0], U[0, 0]))\n width, height = 2 * np.sqrt(s)\n else:\n angle = 0\n width, height = 2 * np.sqrt(covariance)\n \n # Draw the Ellipse\n for nsig in range(1, 4):\n ax.add_patch(Ellipse(position, nsig * width, nsig * height,\n angle, **kwargs))\n \ndef plot_gmm(gmm, X, label=True, ax=None):\n ax = ax or plt.gca()\n labels = gmm.fit(X).predict(X)\n if label:\n ax.scatter(X[:, 0], X[:, 1], c=labels, s=40, cmap='viridis', zorder=2)\n else:\n ax.scatter(X[:, 0], X[:, 1], s=40, zorder=2)\n ax.axis('equal')\n \n w_factor = 0.2 / gmm.weights_.max()\n for pos, covar, w in zip(gmm.means_, gmm.covariances_, gmm.weights_):\n draw_ellipse(pos, covar, alpha=w * w_factor)\n ax.scatter(pos[0],pos[1], color= \"red\",edgecolor='black',marker=\"*\", s=200 , alpha=1, zorder=10)",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\nimport pandas as pd\n\niris = sns.load_dataset(\"iris\")\nX=iris[[\"sepal_length\",\"petal_length\"] ].to_numpy()\niris\niris[\"cc\"] = pd.Categorical(iris.species)\ndf=iris\nfor col_name in df.columns:\n if(df[col_name].dtype == 'object'):\n df[col_name]= df[col_name].astype('category')\n df[col_name] = df[col_name].cat.codes\ndf",
"_____no_output_____"
],
[
"# plot\nplt.scatter( X[:, 0], X[:, 1], s=50, c= iris[\"species\"])\nplt.show()",
"_____no_output_____"
],
[
"from sklearn.mixture import GaussianMixture \ngmm = GaussianMixture(n_components=3, n_init=10)",
"_____no_output_____"
],
[
"y_labels= gmm.fit_predict(X)",
"_____no_output_____"
],
[
"y_labels, np.array(df.species)",
"_____no_output_____"
],
[
"plt.scatter(X[:,0], X[:,1], c=y_labels, s=50, cmap=plt.cm.Paired, alpha=0.4)",
"_____no_output_____"
],
[
"gmm.covariances_",
"_____no_output_____"
],
[
"plot_gmm(gmm, X)",
"_____no_output_____"
],
[
"index= []\nbic = []\nfor i in range(1,11):\n gmm = GaussianMixture(n_components=i, n_init=10)\n gmm.fit(X)\n index.append(i)\n bic.append(gmm.bic(X))",
"_____no_output_____"
],
[
"plt.plot(index,bic)",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a174da5314c6c0bd265d99570e2dba0f737d8a3
| 3,754 |
ipynb
|
Jupyter Notebook
|
notebooks/readme.ipynb
|
svirpridon/datafiles
|
418113dba98787658eef56c9eb8750a7fab36ec4
|
[
"MIT"
] | null | null | null |
notebooks/readme.ipynb
|
svirpridon/datafiles
|
418113dba98787658eef56c9eb8750a7fab36ec4
|
[
"MIT"
] | null | null | null |
notebooks/readme.ipynb
|
svirpridon/datafiles
|
418113dba98787658eef56c9eb8750a7fab36ec4
|
[
"MIT"
] | null | null | null | 16.90991 | 73 | 0.466436 |
[
[
[
"%%sh\n\nrm -rf inventory",
"_____no_output_____"
]
],
[
[
"# Define a model",
"_____no_output_____"
]
],
[
[
"from datafiles import datafile\n\n@datafile(\"inventory/items/{self.name}.yml\")\nclass InventoryItem:\n \"\"\"Class for keeping track of an item in inventory.\"\"\"\n \n name: str\n unit_price: float\n quantity_on_hand: int = 0\n\n def total_cost(self) -> float:\n return self.unit_price * self.quantity_on_hand",
"_____no_output_____"
],
[
"item = InventoryItem(\"widget\", 3)",
"_____no_output_____"
],
[
"%%sh\n\ncat inventory/items/widget.yml",
"unit_price: 3.0\n"
]
],
[
[
"# Save object changes",
"_____no_output_____"
]
],
[
[
"item.quantity_on_hand += 100",
"_____no_output_____"
],
[
"%%sh\n\ncat inventory/items/widget.yml",
"unit_price: 3.0\nquantity_on_hand: 100\n"
]
],
[
[
"# Load file changes",
"_____no_output_____"
]
],
[
[
"%%writefile inventory/items/widget.yml\n\nunit_price: 2.5 # was 3.0\nquantity_on_hand: 100",
"Overwriting inventory/items/widget.yml\n"
],
[
"item.unit_price",
"_____no_output_____"
]
],
[
[
"# Restore object from file",
"_____no_output_____"
]
],
[
[
"from datafiles import Missing\n\nitem = InventoryItem(\"widget\", Missing)\n\nassert item.unit_price == 2.5\nassert item.quantity_on_hand == 100",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a177dd8520b86d6073e09b2d62ffa24d576535c
| 2,475 |
ipynb
|
Jupyter Notebook
|
Cardio_Catch_Diseases/CCD_PandasProfiling.ipynb
|
daniellecd/portfolio
|
f7d069aeef528c0a6f5854ae337643f78869ee03
|
[
"MIT"
] | 2 |
2020-12-03T01:11:48.000Z
|
2021-06-30T14:45:20.000Z
|
Cardio_Catch_Diseases/CCD_PandasProfiling.ipynb
|
daniellecd/portfolio
|
f7d069aeef528c0a6f5854ae337643f78869ee03
|
[
"MIT"
] | null | null | null |
Cardio_Catch_Diseases/CCD_PandasProfiling.ipynb
|
daniellecd/portfolio
|
f7d069aeef528c0a6f5854ae337643f78869ee03
|
[
"MIT"
] | null | null | null | 26.902174 | 257 | 0.539798 |
[
[
[
"<a href=\"https://colab.research.google.com/github/daniellecd/portfolio/blob/main/Cardio_Catch_Disease/CCD_PandasProfiling.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"#Instação do pandas-profiling\n!pip install --pre -U pandas-profiling",
"_____no_output_____"
],
[
"#Importação das bibliotecas necessárias\nimport numpy as np\nimport pandas as pd\nfrom pandas_profiling import ProfileReport",
"_____no_output_____"
],
[
"#Inclusão do dataset\ndf = pd.read_csv('https://raw.githubusercontent.com/daniellecd/portfolio/master/Cardio_Catch_Diseases/data/cardio_train.csv', sep = ';')",
"_____no_output_____"
],
[
"#Geração do relatório para ser visualizado no próprio ambiente Colab/Jupyter\nprofile = ProfileReport(df, title='Diagnóstico Precoce de Doenças Cardiovasculares - CCD', explorative=True, progress_bar=False)\nprofile.to_notebook_iframe()",
"_____no_output_____"
],
[
"#Exportação do relatório em html\nprofile.to_file(output_file='CCD_pandasprofiling_report.html')",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
4a1783384d2ebc62873dbd90ee2c679dedd18a6d
| 29,740 |
ipynb
|
Jupyter Notebook
|
Inferential_Statistical_Analysis_with_Python-master/week3/Introduction to Hypothesis Testing in Python.ipynb
|
rezapci/UofM_Statistics_with_Python_Specialization
|
edc31cadcbada20d385ae9b0304b8c0cb7ba83e2
|
[
"MIT"
] | 2 |
2020-05-11T18:39:31.000Z
|
2022-01-26T09:08:02.000Z
|
Inferential_Statistical_Analysis_with_Python-master/week3/Introduction to Hypothesis Testing in Python.ipynb
|
rezapci/UofM_Statistics_with_Python_Specialization
|
edc31cadcbada20d385ae9b0304b8c0cb7ba83e2
|
[
"MIT"
] | null | null | null |
Inferential_Statistical_Analysis_with_Python-master/week3/Introduction to Hypothesis Testing in Python.ipynb
|
rezapci/UofM_Statistics_with_Python_Specialization
|
edc31cadcbada20d385ae9b0304b8c0cb7ba83e2
|
[
"MIT"
] | 2 |
2020-05-11T18:39:17.000Z
|
2020-05-12T14:59:37.000Z
| 32.186147 | 247 | 0.431204 |
[
[
[
"# Hypothesis Testing\n\nFrom lecture, we know that hypothesis testing is a critical tool in determing what the value of a parameter could be.\n\nWe know that the basis of our testing has two attributes:\n\n**Null Hypothesis: $H_0$**\n\n**Alternative Hypothesis: $H_a$**\n\nThe tests we have discussed in lecture are:\n\n* One Population Proportion\n* Difference in Population Proportions\n* One Population Mean\n* Difference in Population Means\n\nIn this tutorial, I will introduce some functions that are extremely useful when calculating a t-statistic and p-value for a hypothesis test.\n\nLet's quickly review the following ways to calculate a test statistic for the tests listed above.\n\nThe equation is:\n\n$$\\frac{Best\\ Estimate - Hypothesized\\ Estimate}{Standard\\ Error\\ of\\ Estimate}$$ \n\nWe will use the examples from our lectures and use python functions to streamline our tests.",
"_____no_output_____"
]
],
[
[
"import statsmodels.api as sm\nimport numpy as np\nimport pandas as pd",
"_____no_output_____"
]
],
[
[
"### One Population Proportion\n\n#### Research Question \n\nIn previous years 52% of parents believed that electronics and social media was the cause of their teenager’s lack of sleep. Do more parents today believe that their teenager’s lack of sleep is caused due to electronics and social media? \n\n**Population**: Parents with a teenager (age 13-18) \n**Parameter of Interest**: p \n**Null Hypothesis:** p = 0.52 \n**Alternative Hypthosis:** p > 0.52 \n\n1018 Parents\n\n56% believe that their teenager’s lack of sleep is caused due to electronics and social media.",
"_____no_output_____"
]
],
[
[
"help(sm.stats.proportions_ztest)",
"Help on function proportions_ztest in module statsmodels.stats.proportion:\n\nproportions_ztest(count, nobs, value=None, alternative='two-sided', prop_var=False)\n Test for proportions based on normal (z) test\n \n Parameters\n ----------\n count : integer or array_like\n the number of successes in nobs trials. If this is array_like, then\n the assumption is that this represents the number of successes for\n each independent sample\n nobs : integer or array-like\n the number of trials or observations, with the same length as\n count.\n value : float, array_like or None, optional\n This is the value of the null hypothesis equal to the proportion in the\n case of a one sample test. In the case of a two-sample test, the\n null hypothesis is that prop[0] - prop[1] = value, where prop is the\n proportion in the two samples. If not provided value = 0 and the null\n is prop[0] = prop[1]\n alternative : string in ['two-sided', 'smaller', 'larger']\n The alternative hypothesis can be either two-sided or one of the one-\n sided tests, smaller means that the alternative hypothesis is\n ``prop < value`` and larger means ``prop > value``. In the two sample\n test, smaller means that the alternative hypothesis is ``p1 < p2`` and\n larger means ``p1 > p2`` where ``p1`` is the proportion of the first\n sample and ``p2`` of the second one.\n prop_var : False or float in (0, 1)\n If prop_var is false, then the variance of the proportion estimate is\n calculated based on the sample proportion. Alternatively, a proportion\n can be specified to calculate this variance. Common use case is to\n use the proportion under the Null hypothesis to specify the variance\n of the proportion estimate.\n \n Returns\n -------\n zstat : float\n test statistic for the z-test\n p-value : float\n p-value for the z-test\n \n Examples\n --------\n >>> count = 5\n >>> nobs = 83\n >>> value = .05\n >>> stat, pval = proportions_ztest(count, nobs, value)\n >>> print('{0:0.3f}'.format(pval))\n 0.695\n \n >>> import numpy as np\n >>> from statsmodels.stats.proportion import proportions_ztest\n >>> count = np.array([5, 12])\n >>> nobs = np.array([83, 99])\n >>> stat, pval = proportions_ztest(counts, nobs)\n >>> print('{0:0.3f}'.format(pval))\n 0.159\n \n Notes\n -----\n This uses a simple normal test for proportions. It should be the same as\n running the mean z-test on the data encoded 1 for event and 0 for no event\n so that the sum corresponds to the count.\n \n In the one and two sample cases with two-sided alternative, this test\n produces the same p-value as ``proportions_chisquare``, since the\n chisquare is the distribution of the square of a standard normal\n distribution.\n\n"
],
[
"n = 1018\npnull = .52\nphat = .56\nsm.stats.proportions_ztest(phat * n, n, pnull)",
"_____no_output_____"
]
],
[
[
"### Difference in Population Proportions\n\n#### Research Question\n\nIs there a significant difference between the population proportions of parents of black children and parents of Hispanic children who report that their child has had some swimming lessons?\n\n**Populations**: All parents of black children age 6-18 and all parents of Hispanic children age 6-18 \n**Parameter of Interest**: p1 - p2, where p1 = black and p2 = hispanic \n**Null Hypothesis:** p1 - p2 = 0 \n**Alternative Hypthosis:** p1 - p2 $\\neq$ 0 \n\n247 Parents of Black Children\n36.8% of parents report that their child has had some swimming lessons.\n\n308 Parents of Hispanic Children\n38.9% of parents report that their child has had some swimming lessons.",
"_____no_output_____"
]
],
[
[
"help(sm.stats.ttest_ind)",
"Help on function ttest_ind in module statsmodels.stats.weightstats:\n\nttest_ind(x1, x2, alternative='two-sided', usevar='pooled', weights=(None, None), value=0)\n ttest independent sample\n \n convenience function that uses the classes and throws away the intermediate\n results,\n compared to scipy stats: drops axis option, adds alternative, usevar, and\n weights option\n \n Parameters\n ----------\n x1, x2 : array_like, 1-D or 2-D\n two independent samples, see notes for 2-D case\n alternative : string\n The alternative hypothesis, H1, has to be one of the following\n \n 'two-sided': H1: difference in means not equal to value (default)\n 'larger' : H1: difference in means larger than value\n 'smaller' : H1: difference in means smaller than value\n \n usevar : string, 'pooled' or 'unequal'\n If ``pooled``, then the standard deviation of the samples is assumed to be\n the same. If ``unequal``, then Welsh ttest with Satterthwait degrees\n of freedom is used\n weights : tuple of None or ndarrays\n Case weights for the two samples. For details on weights see\n ``DescrStatsW``\n value : float\n difference between the means under the Null hypothesis.\n \n \n Returns\n -------\n tstat : float\n test statisic\n pvalue : float\n pvalue of the t-test\n df : int or float\n degrees of freedom used in the t-test\n\n"
],
[
"n1 = 247\np1 = .37\n\nn2 = 308\np2 = .39\n\npopulation1 = np.random.binomial(1, p1, n1)\npopulation2 = np.random.binomial(1, p2, n2)\n\nsm.stats.ttest_ind(population1, population2)",
"_____no_output_____"
]
],
[
[
"### One Population Mean\n\n#### Research Question \n\nIs the average cartwheel distance (in inches) for adults \nmore than 80 inches?\n\n**Population**: All adults \n**Parameter of Interest**: $\\mu$, population mean cartwheel distance.\n**Null Hypothesis:** $\\mu$ = 80\n**Alternative Hypthosis:** $\\mu$ > 80\n\n25 Adults\n\n$\\mu = 82.46$\n\n$\\sigma = 15.06$",
"_____no_output_____"
]
],
[
[
"df = pd.read_csv(\"Cartwheeldata.csv\")\ndf.head()",
"_____no_output_____"
],
[
"n = len(df)\nmean = df[\"CWDistance\"].mean()\nsd = df[\"CWDistance\"].std()\n(n, mean, sd)",
"_____no_output_____"
],
[
"help(sm.stats.proportions_ztest)",
"Help on function proportions_ztest in module statsmodels.stats.proportion:\n\nproportions_ztest(count, nobs, value=None, alternative='two-sided', prop_var=False)\n Test for proportions based on normal (z) test\n \n Parameters\n ----------\n count : integer or array_like\n the number of successes in nobs trials. If this is array_like, then\n the assumption is that this represents the number of successes for\n each independent sample\n nobs : integer or array-like\n the number of trials or observations, with the same length as\n count.\n value : float, array_like or None, optional\n This is the value of the null hypothesis equal to the proportion in the\n case of a one sample test. In the case of a two-sample test, the\n null hypothesis is that prop[0] - prop[1] = value, where prop is the\n proportion in the two samples. If not provided value = 0 and the null\n is prop[0] = prop[1]\n alternative : string in ['two-sided', 'smaller', 'larger']\n The alternative hypothesis can be either two-sided or one of the one-\n sided tests, smaller means that the alternative hypothesis is\n ``prop < value`` and larger means ``prop > value``. In the two sample\n test, smaller means that the alternative hypothesis is ``p1 < p2`` and\n larger means ``p1 > p2`` where ``p1`` is the proportion of the first\n sample and ``p2`` of the second one.\n prop_var : False or float in (0, 1)\n If prop_var is false, then the variance of the proportion estimate is\n calculated based on the sample proportion. Alternatively, a proportion\n can be specified to calculate this variance. Common use case is to\n use the proportion under the Null hypothesis to specify the variance\n of the proportion estimate.\n \n Returns\n -------\n zstat : float\n test statistic for the z-test\n p-value : float\n p-value for the z-test\n \n Examples\n --------\n >>> count = 5\n >>> nobs = 83\n >>> value = .05\n >>> stat, pval = proportions_ztest(count, nobs, value)\n >>> print('{0:0.3f}'.format(pval))\n 0.695\n \n >>> import numpy as np\n >>> from statsmodels.stats.proportion import proportions_ztest\n >>> count = np.array([5, 12])\n >>> nobs = np.array([83, 99])\n >>> stat, pval = proportions_ztest(counts, nobs)\n >>> print('{0:0.3f}'.format(pval))\n 0.159\n \n Notes\n -----\n This uses a simple normal test for proportions. It should be the same as\n running the mean z-test on the data encoded 1 for event and 0 for no event\n so that the sum corresponds to the count.\n \n In the one and two sample cases with two-sided alternative, this test\n produces the same p-value as ``proportions_chisquare``, since the\n chisquare is the distribution of the square of a standard normal\n distribution.\n\n"
],
[
"sm.stats.ztest(df[\"CWDistance\"], value = 80, alternative = \"larger\")",
"_____no_output_____"
]
],
[
[
"### Difference in Population Means\n\n#### Research Question \n\nConsidering adults in the NHANES data, do males have a significantly higher mean Body Mass Index than females?\n\n**Population**: Adults in the NHANES data. \n**Parameter of Interest**: $\\mu_1 - \\mu_2$, Body Mass Index. \n**Null Hypothesis:** $\\mu_1 = \\mu_2$ \n**Alternative Hypthosis:** $\\mu_1 \\neq \\mu_2$\n\n2976 Females \n$\\mu_1 = 29.94$ \n$\\sigma_1 = 7.75$ \n\n2759 Male Adults \n$\\mu_2 = 28.78$ \n$\\sigma_2 = 6.25$ \n\n$\\mu_1 - \\mu_2 = 1.16$",
"_____no_output_____"
]
],
[
[
"url = \"nhanes_2015_2016.csv\"\nda = pd.read_csv(url)\nda.head()",
"_____no_output_____"
],
[
"females = da[da[\"RIAGENDR\"] == 2]\nmale = da[da[\"RIAGENDR\"] == 1]",
"_____no_output_____"
],
[
"n1 = len(females)\nmu1 = females[\"BMXBMI\"].mean()\nsd1 = females[\"BMXBMI\"].std()\n\n(n1, mu1, sd1)",
"_____no_output_____"
],
[
"n2 = len(male)\nmu2 = male[\"BMXBMI\"].mean()\nsd2 = male[\"BMXBMI\"].std()\n\n(n2, mu2, sd2)",
"_____no_output_____"
],
[
"sm.stats.ztest(females[\"BMXBMI\"].dropna(), male[\"BMXBMI\"].dropna())",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
4a17a000041dc44b6e571128f4b1755a0c0b34b9
| 24,153 |
ipynb
|
Jupyter Notebook
|
lectures/Lecture6-density-estimation.ipynb
|
uw-astro/astr-598a-win22
|
65e0f366e164c276f1dfc06873741c6f6c94b300
|
[
"BSD-3-Clause"
] | null | null | null |
lectures/Lecture6-density-estimation.ipynb
|
uw-astro/astr-598a-win22
|
65e0f366e164c276f1dfc06873741c6f6c94b300
|
[
"BSD-3-Clause"
] | null | null | null |
lectures/Lecture6-density-estimation.ipynb
|
uw-astro/astr-598a-win22
|
65e0f366e164c276f1dfc06873741c6f6c94b300
|
[
"BSD-3-Clause"
] | 1 |
2022-01-10T16:01:50.000Z
|
2022-01-10T16:01:50.000Z
| 38.096215 | 431 | 0.547096 |
[
[
[
"### University of Washington: Machine Learning and Statistics \n\n# Lecture 6: Density Estimation 1\n\nAndrew Connolly and Stephen Portillo",
"_____no_output_____"
],
[
"##### Resources for this notebook include:\n- [Textbook](https://press.princeton.edu/books/hardcover/9780691198309/statistics-data-mining-and-machine-learning-in-astronomy) Chapter 8. \n- [astroML website](https://www.astroml.org/index.html)\n\nThis notebook is developed based on material from A. Connolly, Z. Ivezic, M. Juric, S. Portillo, G. Richards, B. Sipocz, J. VanderPlas, D. Hogg, Killian Weinberger and many others.\n\nThe notebook and assoociated material are available from [github](https://github.com/uw-astro/astr-598a-win22).\n\nMake sure you are using the latest version of astroML\n\n> pip install --pre -U astroml",
"_____no_output_____"
],
[
"<a id='toc'></a>\n\n## This notebook includes:\n\n[Introduction to Clustering ](#basics) \n\n[1-D hypothesis testing](#1Dht)\n\n[K-means clustering algorithm](#kmeans) \n\n[Kernel Density Estimation](#kde)\n\n[K-nearest neighbors](#knn) \n\n\n\n",
"_____no_output_____"
],
[
"## Introduction to Clustering <a id='basics'></a>\n[Go to top](#toc)\n\n“Clustering” in astronomy refers to a number of different aspects of data analysis. Given a multivariate point data set, we can ask whether it displays any structure, that is, concentrations of points. Alternatively, when a density estimate is available we can search for “overdensities”. Another way to interpret clustering is to seek a partitioning or segmentation of data into smaller parts according to some criteria. \n\n",
"_____no_output_____"
],
[
"#### Unsupervised vs. Supervised Classification \n\nIn density estimation, we estimate joint probability distributions from multivariate data sets to identify the inherent clustering. This is essentially **unsupervised classification**. Here “unsupervised” means that there is no prior information about the number and properties of clusters. In other words, this method is a search for unknown structure in your (multi-dimensional) dataset.\n\nIf we have labels for some of these data points (e.g., an object is tall, short, red, or blue), we can develop a relationship between the label and the properties of a source. This is **supervised classification**. In other words, this method is finding objects in your (multi-dimensional) dataset that \"look like\" objects in your training set. \n\nClassification, regression, and density estimation are all related. For example, the regression function $\\hat{y} = f(y|\\vec{x})$ is the best estimated value of $y$ given a value of $\\vec{x}$. In classification $y$ is categorical and $f(y|\\vec{x})$ is called the _discriminant function_\n ",
"_____no_output_____"
],
[
"## 1-D hypothesis testing <a id='1Dht'></a>\n[Go to top](#toc)\n\nHow do we decide about the existance of a cluster? Let's start with\nthe simplest but fundamental example: 1-D hypothesis testing.\n\n\n**Motivating question:** You just measured x = 3, with a negligible measurement error.\n\nYou know that you could have drawn this value from one of two possible populations (e.g. stars and galaxies). One population can be described as N(0,2), and the other one as N(4,1). \n\nWhich population is more likely, given your x? \n\nNaive answer: 3 is closer to 4 (\"1 $\\sigma$ away\") than to 0\n(\"1.5 $\\sigma$ away\") so the second population is more likely.\n\nLet's see why this answer is wrong...\n ",
"_____no_output_____"
],
[
"If the underlying distribution, h(x), is the sum of two populations\n\n$$h(x) = (1-a) h_B (x) + a h_S (x) $$\n\nwith $a$ the normalization coefficient. Given ${x_i}$ we want to know $p_S(x_i)$ (which means $p_B(x_i) = 1 - p_S(x_i)$)\n\n",
"_____no_output_____"
],
[
"We can choose a classification boundary, $x_c$. From this we can defined the expected number of spurious sources (false positives or Type I errors)\n\n$$n_{spurious} = N(1-a) \\int_{x_c}^{\\infty} h_B(x)dx $$\n\nand the number of missed (false negative or Type II errors) \n\n$$n_{missed} = N a \\int_{0}^{x_c} h_S(x)dx $$\n\nNumber of sources will be \n$$n_{sources} = N a - n_{missed} + n_{spurious} $$\n\nThe completeness of the sample (sometimes called the recall or sensitivity) is then \n\n$$\\eta = \\frac{N a - n_{missed} }{N a} = 1 - \\int_{0}^{x_c} h_S(x)dx $$\n\nand the contamination of the sample is \n\n$$\\epsilon = \\frac{n_{spurious}}{n_{source}} $$",
"_____no_output_____"
],
[
"and the decision boundary is the $x$ value at which each class is equally likely,\n\n$$\\pi_1 p_1(x) = \\pi_2 p_2(x) $$\n\n\n$\\pi_i$ is the prior on the object being in class $i$ (estimated from the relative numbers of sources in each class). The form of $h_S$ and $h_B$ and the priors are needed in deciding the classification threshold ",
"_____no_output_____"
],
[
"## K-means clustering algorithm <a id='kmeans'></a>\n[Go to top](#toc)\n\n ",
"_____no_output_____"
],
[
"Question is: how do we find clusters or estimate density efficiently?\n\n<u> The _K-means_ algorithm </u>\n\nThe first approach for finding clusters that is always taught is $K$-means (simple and works well)\n\n$K$-means partitions points into $K$ disjoint subsets ($C_k$) with each subset containing $N_k$\npoints \n\n\nIt minimizes the objective/cost/likelihood function,\n$\\sum_{k=1}^K \\sum_{i \\in C_k} || x_i - \\mu_k ||^2$\n\n$\\mu_k = \\frac{1}{N_k} \\sum_{i \\in C_k} x_i$ is the mean of the\npoints in set $C_k$\n\n\n_Procedure:_\n\n1. define the number of clusters $K$\n2. choose the centroid, $\\mu_k$, of each of the $K$ clusters\n3. assign each point to the cluster that it is closest to\n4. update the centroid of each cluster by recomputing $\\mu_k$ according to the new assignments.\n5. goto (3) until there are no new assignments.\n\nGlobal optima are not guaranteed but the process never increases the sum-of-squares error.\n\nTypically we run multiple times with different starting values for the\ncentroids of $C_k$.\n\nWe will start with looking at the density of stars as a function of metalicity and use scikit-learns preprocessing. We use the StandardScaler function to normalize each feature",
"_____no_output_____"
]
],
[
[
"def warn(*args, **kwargs):\n pass\nimport warnings\nwarnings.warn = warn",
"_____no_output_____"
],
[
"%matplotlib inline\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib.patches import Ellipse\nfrom scipy.stats import norm\n\nfrom sklearn.cluster import KMeans\nfrom sklearn import preprocessing\n\nfrom astroML.datasets import fetch_sdss_sspp\n\n#------------------------------------------------------------\n# Get data\ndata = fetch_sdss_sspp(cleaned=True)\nX = np.vstack([data['FeH'], data['alphFe']]).T\n\n# truncate dataset for speed\nX = X[::5]\n\n#------------------------------------------------------------\n# Compute a 2D histogram of the input\n# Fe vs H\n#O, Ne, Mg, Si, S, Ar, Ca, and Ti vs Fe\nH, FeH_bins, alphFe_bins = np.histogram2d(data['FeH'], data['alphFe'], 50)\n\n#------------------------------------------------------------\n# Compute the KMeans clustering\nn_clusters = 2\n\nscaler = preprocessing.StandardScaler()\nclf = KMeans(n_clusters)\nclf.fit(scaler.fit_transform(X.astype(\"float\")))\n\n#------------------------------------------------------------\n# Visualize the results\nfig = plt.figure(figsize=(6, 6))\nax = fig.add_subplot()\n\n# plot density\nax = plt.axes()\nax.imshow(H.T, origin='lower', interpolation='nearest', aspect='auto',\n extent=[FeH_bins[0], FeH_bins[-1],\n alphFe_bins[0], alphFe_bins[-1]],\n cmap=plt.cm.binary)\n\n# plot cluster centers\ncluster_centers = scaler.inverse_transform(clf.cluster_centers_)\nax.scatter(cluster_centers[:, 0], cluster_centers[:, 1],\n s=40, c='w', edgecolors='k')\n\n# plot cluster boundaries\nFeH_centers = 0.5 * (FeH_bins[1:] + FeH_bins[:-1])\nalphFe_centers = 0.5 * (alphFe_bins[1:] + alphFe_bins[:-1])\n\nXgrid = np.meshgrid(FeH_centers, alphFe_centers)\nXgrid = np.array(Xgrid).reshape((2, 50 * 50)).T\n\nH = clf.predict(scaler.transform(Xgrid)).reshape((50, 50))\n\nfor i in range(n_clusters):\n Hcp = H.copy()\n flag = (Hcp == i)\n Hcp[flag] = 1\n Hcp[~flag] = 0\n\n ax.contour(FeH_centers, alphFe_centers, Hcp, [-0.5, 0.5],\n linewidths=2, colors='k')\n\nax.xaxis.set_major_locator(plt.MultipleLocator(0.3))\nax.set_xlim(-1.101, 0.101)\nax.set_ylim(alphFe_bins[0], alphFe_bins[-1])\n\nax.set_xlabel(r'$\\rm [Fe/H]$')\nax.set_ylabel(r'$\\rm [\\alpha/Fe]$')\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"***How do you choose the number of clusters?***",
"_____no_output_____"
],
[
"## Kernel Density Estimation <a id='kde'></a>\n\n[Go to top](#toc)\n\n\n$N(x) = \\frac{1}{Nh^D} \\sum_{i=1}^N K\\left( \\frac{d(x,x_i)}{h} \\right),$\n\nK: kernel (defined by the bandwidth h) is any smooth function which is positive at all values\n\nToo narrow a kernel, too spiky the results (high variance)\n\nToo broad a kernel, too smooth or washed out the results (bias)\n\n_Common kernels_\n\nSquard exponential (Normal): $ K(u) = \\frac{1}{(2\\pi)^{D/2}} e^{- u^2 / 2}$ D: dimension\n\nTophat: $ K(u) = \\left\\{\n \\begin{array}{ll}\n \\frac{1}{V_D(r)} & {\\rm if}\\ u \\le r,\\\\\n 0 & {\\rm if}\\ u > r,\n \\end{array}\n \\right.$\n \nExponential: $ K(u) = \\frac{1}{D!\\, V_D(r)}e^{-|u|}$ \n\nwith $V_D(r)$ the volume of a hypersphere radius $r$; $V_D(r) = \\frac{2r^D\\pi^{D/2}}{D\\ \\Gamma(D/2)}$\n\n<img src=\"figures/funcs.png\">\n\nPerhaps surprisingly the primary feature is the bandwidth of these distributions not the exact shape. Choosing the bandwidth is usually done through cross-validation\n\n\n",
"_____no_output_____"
],
[
"To demonstrate this, the plot projects galaxies in SDSS \"Great Wall\" as scatted points by their spatial locations onto the equatorial plane (declination ~ $0^o$). The graph below shows the location of each point, but it is hard to get \"clustered information\" from.",
"_____no_output_____"
]
],
[
[
"from matplotlib.colors import LogNorm\nfrom sklearn.neighbors import KernelDensity\nfrom astroML.datasets import fetch_great_wall\n\n#----------------------------------------------------------------------\n# This function adjusts matplotlib settings for a uniform feel in the textbook.\n# Note that with usetex=True, fonts are rendered with LaTeX. This may\n# result in an error if LaTeX is not installed on your system. In that case,\n# you can set usetex to False.\n\n#------------------------------------------------------------\n# Fetch the great wall data\nX = fetch_great_wall()\n\n#------------------------------------------------------------\n# Create the grid on which to evaluate the results\nNx = 50\nNy = 125\nxmin, xmax = (-375, -175)\nymin, ymax = (-300, 200)\n\n#------------------------------------------------------------\n# Evaluate for several models\nXgrid = np.vstack(map(np.ravel, np.meshgrid(np.linspace(xmin, xmax, Nx),\n np.linspace(ymin, ymax, Ny)))).T\n\nkernels = ['gaussian']\ndens = []\n\nbandwidth=5\nkde = KernelDensity(bandwidth=bandwidth, kernel='gaussian')\nlog_dens = kde.fit(X).score_samples(Xgrid)\ndens = X.shape[0] * np.exp(log_dens).reshape((Ny, Nx))\n\n#------------------------------------------------------------\n# Plot the results\nfig = plt.figure(figsize=(15, 8))\nfig.subplots_adjust(left=0.12, right=0.95, bottom=0.2, top=0.9,\n hspace=0.01, wspace=0.01)\n\n# First plot: scatter the points\nax1 = plt.subplot(221, aspect='equal')\nax1.scatter(X[:, 1], X[:, 0], s=1, lw=0, c='k')\nax1.text(0.95, 0.9, \"input\", ha='right', va='top',\n transform=ax1.transAxes,\n bbox=dict(boxstyle='round', ec='k', fc='w'))\n\n# Second plot: gaussian kernel\nax2 = plt.subplot(222, aspect='equal')\nax2.imshow(dens.T, origin='lower', norm=LogNorm(),\n extent=(ymin, ymax, xmin, xmax), cmap=plt.cm.binary)\nax2.text(0.95, 0.9, \"Gaussian h={}\".format(bandwidth), ha='right', va='top',\n transform=ax2.transAxes,\n bbox=dict(boxstyle='round', ec='k', fc='w'))\n\nfor ax in [ax1, ax2]:\n ax.set_xlim(ymin, ymax - 0.01)\n ax.set_ylim(xmin, xmax)\n\nfor ax in [ax1, ax2]:\n ax.xaxis.set_major_formatter(plt.NullFormatter())\n ax.set_xlabel('$y$ (Mpc)')\n\nfor ax in [ax2]:\n ax.yaxis.set_major_formatter(plt.NullFormatter())\n\nfor ax in [ax1]:\n ax.set_ylabel('$x$ (Mpc)')\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"##### Exercise: Use Kernel Density Estimation with any kernel you choose on the color-magnitude diagrams (CMDs) of the two data sets Field A and Field B. Plot the density for each CMD in each panel (i.e., $g-r$ on the x axis and $g$ on the y axis) - a Hess diagrams.\n\nExperiment with different kernel bandwidths, plotting one that visually seems \"best\" (i.e., a good balance of bias vs. variance) for each kernel.\n\nDon't forget to change the figure size so that individual panels have aspect ratios closer to what is common for color-magnitude diagrams (i.e., x:y ~ 4:6 or so).\n\nSubtract the \"best\" density for Field B from A to see if there are structures present in the CMD. What are they?",
"_____no_output_____"
]
],
[
[
"#Hess diagrams with SDSS data\nimport pandas as pd\nfieldA = pd.read_csv('data/fieldA.csv')\nfieldB = pd.read_csv('data/fieldB.csv')\n\n# Add a column for color\nfieldA['g-r'] = fieldA.g - fieldA.r\nfieldB['g-r'] = fieldB.g - fieldB.r\n\nfig = plt.figure(figsize=(8, 8))\nax = fig.add_subplot(121, aspect='equal')\nax.scatter(fieldA['g-r'],fieldA['g'], s=1, alpha=0.5)\nax.set_xlim(-0.5, 2)\nax.set_ylim(22,11)\nax.set_xlabel('g-r')\nax.set_ylabel('g')\n\nax = fig.add_subplot(122, aspect='equal')\nax.scatter(fieldB['g-r'],fieldB['g'], s=1, alpha=0.5)\nax.set_xlim(-0.5, 2)\nax.set_ylim(22,11)\nax.set_xlabel('g-r')\nax.set_ylabel('g')\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Nearest neighbor estimation <a id='knn'></a>\n\n[Go to top](#toc)\n\nSimple (simplest?) density estimator heavily used in astrophysics (cluster detection, large scale structure measures), originally proposed by [Dressler et al. 1980](https://ui.adsabs.harvard.edu/abs/1980ApJ...236..351D/abstract) . \n\nFor each point we find the distance to the $K$th-nearest neighbor, $d_K$. **Note: we are not choosing clusters here** In this method, the implied point density at an arbitrary position x is estimated as\n\n$$\\hat{f_K}(x) = \\frac{K}{V_D(d_K)}$$\n\nwhere $V_D$ is evaluated volume, and D is the problem dimensionality. \n \nBy taking the assumption that the underlying density field is locally constant, we can further simplify this method as\n\n$$\\hat{f_K}(x) = \\frac{C}{d_K^D}$$\n\nwhere C is a scaling factor evaluated by requiring that the sum of the product of $\\hat{f_K}(x)$ and\npixel volume is equal to the total number of data points.\n\nThe error on $\\hat{f}_K(x)$ is \n\n$$\\sigma_f = K^{1/2}/V_D (d_K)$$\n\nThe fractional (relative) error is \n\n$$\\sigma_f/\\hat{f} = 1/K^{1/2}$$.\n\nWe can see that the\n* fractional accuracy increases with $K$ at expense of the spatial resolution (bias-variance trade-off)\n* effective resolution scales with $K^{1/D}$\n\nThe method can be improved by considering distances to _all_ $K$ nearest neighbors \n\n$$\\hat{f}_K(x) = {C \\over \\sum_{i=1}^K d_i^D}$$\n\nThe normalization when computing local density without regard to overall mean density is\n\n$$C = \\frac{K\\, (K + 1)}{2 V_D(r)}$$\n\nIn this method, we can change parameter k to get different estimation result. k should be at least 5 because the estimator is biased and has a large variance for smaller k; see [Casertano, S. and Hut, P.](https://ui.adsabs.harvard.edu/abs/1985ApJ...298...80C/abstract)",
"_____no_output_____"
]
],
[
[
"from sklearn.neighbors import KernelDensity\nfrom astroML.density_estimation import KNeighborsDensity\n\n#------------------------------------------------------------\n# Create the grid on which to evaluate the results\nNx = 50\nNy = 125\nxmin, xmax = (-375, -175)\nymin, ymax = (-300, 200)\n\n#------------------------------------------------------------\n# Evaluate for several models\nXgrid = np.vstack(map(np.ravel, np.meshgrid(np.linspace(xmin, xmax, Nx),\n np.linspace(ymin, ymax, Ny)))).T\n\nkde = KernelDensity(kernel='gaussian', bandwidth=5)\nlog_pdf_kde = kde.fit(X).score_samples(Xgrid).reshape((Ny, Nx))\ndens_KDE = np.exp(log_pdf_kde)\n\nknn5 = KNeighborsDensity('bayesian', 5)\ndens_k5 = knn5.fit(X).eval(Xgrid).reshape((Ny, Nx))\n\nknn40 = KNeighborsDensity('bayesian', 40)\ndens_k40 = knn40.fit(X).eval(Xgrid).reshape((Ny, Nx))\n\n#------------------------------------------------------------\n# Plot the results\nfig = plt.figure(figsize=(9, 4.0))\nfig.subplots_adjust(left=0.1, right=0.95, bottom=0.14, top=0.9,\n hspace=0.01, wspace=0.01)\n\n# First plot: scatter the points\nax1 = plt.subplot(221, aspect='equal')\nax1.scatter(X[:, 1], X[:, 0], s=1, lw=0, c='k')\nax1.text(0.98, 0.95, \"input\", ha='right', va='top',\n transform=ax1.transAxes, fontsize=12,\n bbox=dict(boxstyle='round', ec='k', fc='w'))\n\n# Second plot: KDE\nax2 = plt.subplot(222, aspect='equal')\nax2.imshow(dens_KDE.T, origin='lower', norm=LogNorm(),\n extent=(ymin, ymax, xmin, xmax), cmap=plt.cm.binary)\nax2.text(0.98, 0.95, \"KDE: gaussian $(h=5)$\", ha='right', va='top',\n transform=ax2.transAxes, fontsize=12,\n bbox=dict(boxstyle='round', ec='k', fc='w'))\n\n# Third plot: KNN, k=5\nax3 = plt.subplot(223, aspect='equal')\nax3.imshow(dens_k5.T, origin='lower', norm=LogNorm(),\n extent=(ymin, ymax, xmin, xmax), cmap=plt.cm.binary)\nax3.text(0.98, 0.95, \"KNN $(k=5)$\", ha='right', va='top',\n transform=ax3.transAxes, fontsize=12,\n bbox=dict(boxstyle='round', ec='k', fc='w'))\n\n# Fourth plot: KNN, k=40\nax4 = plt.subplot(224, aspect='equal')\nax4.imshow(dens_k40.T, origin='lower', norm=LogNorm(),\n extent=(ymin, ymax, xmin, xmax), cmap=plt.cm.binary)\nax4.text(0.98, 0.95, \"KNN $(k=40)$\", ha='right', va='top',\n transform=ax4.transAxes, fontsize=12,\n bbox=dict(boxstyle='round', ec='k', fc='w'))\n\nfor ax in [ax1, ax2, ax3, ax4]:\n ax.set_xlim(ymin, ymax - 0.01)\n ax.set_ylim(xmin, xmax)\n\nfor ax in [ax1, ax2]:\n ax.xaxis.set_major_formatter(plt.NullFormatter())\n\nfor ax in [ax3, ax4]:\n ax.set_xlabel('$y$ (Mpc)')\n\nfor ax in [ax2, ax4]:\n ax.yaxis.set_major_formatter(plt.NullFormatter())\n\nfor ax in [ax1, ax3]:\n ax.set_ylabel('$x$ (Mpc)')\n\nplt.show()",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a17a04369738310afc3b7d31461a407de8b64c7
| 9,332 |
ipynb
|
Jupyter Notebook
|
dd_1/Part 1/Section 09 - Modules, Packages and Namespaces/02 - How Python Imports Modules/How does Python import Modules.ipynb
|
rebekka-halal/bg
|
616a40286fe1d34db2916762c477676ed8067cdb
|
[
"Apache-2.0"
] | null | null | null |
dd_1/Part 1/Section 09 - Modules, Packages and Namespaces/02 - How Python Imports Modules/How does Python import Modules.ipynb
|
rebekka-halal/bg
|
616a40286fe1d34db2916762c477676ed8067cdb
|
[
"Apache-2.0"
] | null | null | null |
dd_1/Part 1/Section 09 - Modules, Packages and Namespaces/02 - How Python Imports Modules/How does Python import Modules.ipynb
|
rebekka-halal/bg
|
616a40286fe1d34db2916762c477676ed8067cdb
|
[
"Apache-2.0"
] | null | null | null | 26.287324 | 957 | 0.562259 |
[
[
[
"### How does Python import Modules?",
"_____no_output_____"
],
[
"When we run a statement such as \n\n`import fractions`\n\nwhat is Python actually doing?",
"_____no_output_____"
],
[
"The first thing to note is that Python is doing the import at **run time**, i.e. while your code is actually running.\n\nThis is different from traditional compiled languages such as C where modules are compiled and linked at compile time.\n\nIn both cases though, the system needs to know **where** those code files exist.\n\nPython uses a relatively complex system of how to find and load modules. I'm not going to even attempt to describe this in detail, but we'll take a brief look at the main points.",
"_____no_output_____"
],
[
"The `sys` module has a few properties that define where Python is going to look for modules (either built-in or standard library as well as our own or 3rd party):",
"_____no_output_____"
]
],
[
[
"import sys",
"_____no_output_____"
]
],
[
[
"Where is Python installed?",
"_____no_output_____"
]
],
[
[
"sys.prefix",
"_____no_output_____"
]
],
[
[
"Where are the compiled C binaries located?",
"_____no_output_____"
]
],
[
[
"sys.exec_prefix",
"_____no_output_____"
]
],
[
[
"These two properties are how virtual environments are basically able to work with different environments. Python is installed to a different set of directories, and these prefixes are manipulated to reflect the current Python location.",
"_____no_output_____"
],
[
"Where does Python look for imports?",
"_____no_output_____"
]
],
[
[
"sys.path",
"_____no_output_____"
]
],
[
[
"Basically when we import a module, Python will search for the module in the paths contained in `sys.path`. \n\nIf it does not find the module in one of those paths, the import will fail.\n\nSo if you ever run into a problem where Python is not able to import a module or package, you should check this first to make sure the path to your module/package is in that list.",
"_____no_output_____"
],
[
"At a high level, this is how Python imports a module from file:",
"_____no_output_____"
],
[
"* checks the `sys.modules` cache to see if the module has already been imported - if so it simply uses the reference in there, otherwise:\n* creates a new module object (`types.ModuleType`)\n* loads the source code from file\n* adds an entry to `sys.modules` with name as key and the newly created\n* compiles and executes the source code",
"_____no_output_____"
],
[
"One thing that's really to important to note is that when a module is imported, the module code is **executed**.",
"_____no_output_____"
],
[
"Let's switch over to PyCharm (or your favorite IDE, which may well be VI/emacs and the command line!). All the files are included in the lecture resources or my github repository.",
"_____no_output_____"
],
[
"#### Example 1",
"_____no_output_____"
],
[
"This example shows that when we import a module, the module code is actually **executed**.\n\nFurthermore, that module now has its own namespace that can be seen in `__dict__`.",
"_____no_output_____"
],
[
"#### Example 2",
"_____no_output_____"
],
[
"In this example, we can see that when we `import` a module, Python first looks for it in `sys.modules`.\n\nTo make the point, we put a key/value pair in `sys.modules` ourselves, and then import it.\n\nIn fact we put a function in there instead of a module, and import that.\n\nPlease **DO NOT** this, I'm just making the point that `import` will first look in the cache and immediately just return the object if the name is found, basically just as if we had written:\n\n`\nmodule = sys.modules['module']\n`",
"_____no_output_____"
]
],
[
[
"sys.modules['test'] = lambda: 'Testing module caching'",
"_____no_output_____"
],
[
"import test",
"_____no_output_____"
]
],
[
[
"See, it got the \"module\" from sys...",
"_____no_output_____"
]
],
[
[
"test",
"_____no_output_____"
],
[
"test()",
"_____no_output_____"
]
],
[
[
"#### Example 3a",
"_____no_output_____"
],
[
"In this example we look at a simplified view of how Python imports a module.",
"_____no_output_____"
],
[
"We use two built-in functions, `compile` and `exec`.",
"_____no_output_____"
],
[
"The `compile` function compiles source (e.g. text) into a code object.",
"_____no_output_____"
],
[
"The `exec` function is used to execute a code object. Optionally we can specify what dictionary should be used to store global symbols.\n\nIn our case we are going to want to use our module's `__dict__`.",
"_____no_output_____"
],
[
"#### Example 3b",
"_____no_output_____"
],
[
"This is essentially the same as example 3a, except we make our importer into a function and use it to show how we technically should look for a cached version of the module first.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
4a17b375922551ffd34fd84ec6e7c3bc5561be0a
| 567,387 |
ipynb
|
Jupyter Notebook
|
Traffic_Sign_Classifier.ipynb
|
drtupe/Traffic_Sign_Classifier
|
2ddf731f59fc2115c5e9cd2c730331aa6aff49d8
|
[
"MIT"
] | null | null | null |
Traffic_Sign_Classifier.ipynb
|
drtupe/Traffic_Sign_Classifier
|
2ddf731f59fc2115c5e9cd2c730331aa6aff49d8
|
[
"MIT"
] | null | null | null |
Traffic_Sign_Classifier.ipynb
|
drtupe/Traffic_Sign_Classifier
|
2ddf731f59fc2115c5e9cd2c730331aa6aff49d8
|
[
"MIT"
] | null | null | null | 375.504302 | 54,216 | 0.928763 |
[
[
[
"# Self-Driving Car Engineer Nanodegree\n\n## Deep Learning\n\n## Project: Build a Traffic Sign Recognition Classifier\n\nIn this notebook, a template is provided for you to implement your functionality in stages, which is required to successfully complete this project. If additional code is required that cannot be included in the notebook, be sure that the Python code is successfully imported and included in your submission if necessary. \n\n> **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the iPython Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to \\n\",\n \"**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission. \n\nIn addition to implementing code, there is a writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-Traffic-Sign-Classifier-Project/blob/master/writeup_template.md) that can be used to guide the writing process. Completing the code template and writeup template will cover all of the [rubric points](https://review.udacity.com/#!/rubrics/481/view) for this project.\n\nThe [rubric](https://review.udacity.com/#!/rubrics/481/view) contains \"Stand Out Suggestions\" for enhancing the project beyond the minimum requirements. The stand out suggestions are optional. If you decide to pursue the \"stand out suggestions\", you can include the code in this Ipython notebook and also discuss the results in the writeup file.\n\n\n>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode.",
"_____no_output_____"
],
[
"---\n## Step 0: Load The Data",
"_____no_output_____"
]
],
[
[
"# Load pickled data\nimport pickle\n\n# TODO: Fill this in based on where you saved the training and testing data\n\ntraining_file = '/home/workspace/data/train.p'\nvalidation_file='/home/workspace/data/valid.p'\ntesting_file = '/home/workspace/data/test.p'\n\nwith open(training_file, mode='rb') as f:\n train = pickle.load(f)\nwith open(validation_file, mode='rb') as f:\n valid = pickle.load(f)\nwith open(testing_file, mode='rb') as f:\n test = pickle.load(f)\n \nX_train, y_train = train['features'], train['labels']\nX_valid, y_valid = valid['features'], valid['labels']\nX_test, y_test = test['features'], test['labels']",
"_____no_output_____"
]
],
[
[
"---\n\n## Step 1: Dataset Summary & Exploration\n\nThe pickled data is a dictionary with 4 key/value pairs:\n\n- `'features'` is a 4D array containing raw pixel data of the traffic sign images, (num examples, width, height, channels).\n- `'labels'` is a 1D array containing the label/class id of the traffic sign. The file `signnames.csv` contains id -> name mappings for each id.\n- `'sizes'` is a list containing tuples, (width, height) representing the original width and height the image.\n- `'coords'` is a list containing tuples, (x1, y1, x2, y2) representing coordinates of a bounding box around the sign in the image. **THESE COORDINATES ASSUME THE ORIGINAL IMAGE. THE PICKLED DATA CONTAINS RESIZED VERSIONS (32 by 32) OF THESE IMAGES**\n\nComplete the basic data summary below. Use python, numpy and/or pandas methods to calculate the data summary rather than hard coding the results. For example, the [pandas shape method](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.shape.html) might be useful for calculating some of the summary results. ",
"_____no_output_____"
],
[
"### Provide a Basic Summary of the Data Set Using Python, Numpy and/or Pandas",
"_____no_output_____"
]
],
[
[
"### Replace each question mark with the appropriate value. \n### Use python, pandas or numpy methods rather than hard coding the results\nimport numpy as np\n\n# TODO: Number of training examples\nn_train = X_train.shape[0]\n\n# TODO: Number of validation examples\n# n_validation = ?\n\n# TODO: Number of testing examples.\nn_test = X_test.shape[0]\n\n# TODO: What's the shape of an traffic sign image?\nimage_shape = X_train.shape[1:]\n\n# TODO: How many unique classes/labels there are in the dataset.\nn_classes = len(np.unique(y_train))\n\nprint(\"Number of training examples =\", n_train)\nprint(\"Number of testing examples =\", n_test)\nprint(\"Image data shape =\", image_shape)\nprint(\"Number of classes =\", n_classes)",
"Number of training examples = 34799\nNumber of testing examples = 12630\nImage data shape = (32, 32, 3)\nNumber of classes = 43\n"
]
],
[
[
"### Include an exploratory visualization of the dataset",
"_____no_output_____"
],
[
"Visualize the German Traffic Signs Dataset using the pickled file(s). This is open ended, suggestions include: plotting traffic sign images, plotting the count of each sign, etc. \n\nThe [Matplotlib](http://matplotlib.org/) [examples](http://matplotlib.org/examples/index.html) and [gallery](http://matplotlib.org/gallery.html) pages are a great resource for doing visualizations in Python.\n\n**NOTE:** It's recommended you start with something simple first. If you wish to do more, come back to it after you've completed the rest of the sections. It can be interesting to look at the distribution of classes in the training, validation and test set. Is the distribution the same? Are there more examples of some classes than others?",
"_____no_output_____"
]
],
[
[
"### Data exploration visualization code goes here.\n### Feel free to use as many code cells as needed.\nimport matplotlib.pyplot as plt\nimport random\nimport csv\n# Visualizations will be shown in the notebook.\n%matplotlib inline\n\ndef figures_plotting(figures, nrows = 1, ncols = 1, labels = None):\n fig, axs = plt.subplots(ncols, nrows, figsize = (12, 14))\n axs = axs.ravel()\n for index, title in zip(range(len(figures)), figures):\n axs[index].imshow(figures[title], plt.gray())\n if(labels != None):\n axs[index].set_title(labels[index])\n else:\n axs[index].set_title(title)\n \n axs[index].set_axis_off()\n \n plt.tight_layout()\n \nname_values = np.genfromtxt('signnames.csv', skip_header = 1, dtype = [('myint', 'i8'), ('mystring', 'S55')], delimiter = ',')\n\nnumber_of_images = 8\nfigures = {}\nlabels = {}\n\nfor i in range(number_of_images):\n index = random.randint(0, n_train -1)\n labels[i] = name_values[y_train[index]][1].decode('ascii')\n # print(name_values[y_train[index]][1].decode('ascii'))\n figures[i] = X_train[index]\n \nfigures_plotting(figures, 4, 2, labels)",
"_____no_output_____"
]
],
[
[
"#### Dataset Sign Counts check",
"_____no_output_____"
]
],
[
[
"## This code block is to check the distribution of dataset for training, validation and testing purpose.\n## From the results it seems that data is uniformly distributed for training, validation and test purposes.\n\nunique_train, counts_train = np.unique(y_train, return_counts=True)\nplt.bar(unique_train, counts_train)\nplt.grid()\nplt.title(\"Train Dataset Sign Counts\")\nplt.show()\n\nunique_test, counts_test = np.unique(y_test, return_counts=True)\nplt.bar(unique_test, counts_test)\nplt.grid()\nplt.title(\"Test Dataset Sign Counts\")\nplt.show()\n\nunique_valid, counts_valid = np.unique(y_valid, return_counts=True)\nplt.bar(unique_valid, counts_valid)\nplt.grid()\nplt.title(\"Valid Dataset Sign Counts\")\nplt.show()",
"_____no_output_____"
],
[
"plt.savefig('Train Dataset Sign Counts')\nplt.savefig('Test Dataset Sign Counts')\nplt.savefig('Valid Dataset Sign Counts')",
"_____no_output_____"
]
],
[
[
"----\n\n## Step 2: Design and Test a Model Architecture\n\nDesign and implement a deep learning model that learns to recognize traffic signs. Train and test your model on the [German Traffic Sign Dataset](http://benchmark.ini.rub.de/?section=gtsrb&subsection=dataset).\n\nThe LeNet-5 implementation shown in the [classroom](https://classroom.udacity.com/nanodegrees/nd013/parts/fbf77062-5703-404e-b60c-95b78b2f3f9e/modules/6df7ae49-c61c-4bb2-a23e-6527e69209ec/lessons/601ae704-1035-4287-8b11-e2c2716217ad/concepts/d4aca031-508f-4e0b-b493-e7b706120f81) at the end of the CNN lesson is a solid starting point. You'll have to change the number of classes and possibly the preprocessing, but aside from that it's plug and play! \n\nWith the LeNet-5 solution from the lecture, you should expect a validation set accuracy of about 0.89. To meet specifications, the validation set accuracy will need to be at least 0.93. It is possible to get an even higher accuracy, but 0.93 is the minimum for a successful project submission. \n\nThere are various aspects to consider when thinking about this problem:\n\n- Neural network architecture (is the network over or underfitting?)\n- Play around preprocessing techniques (normalization, rgb to grayscale, etc)\n- Number of examples per label (some have more than others).\n- Generate fake data.\n\nHere is an example of a [published baseline model on this problem](http://yann.lecun.com/exdb/publis/pdf/sermanet-ijcnn-11.pdf). It's not required to be familiar with the approach used in the paper but, it's good practice to try to read papers like these.",
"_____no_output_____"
],
[
"### Pre-process the Data Set (normalization, grayscale, etc.)",
"_____no_output_____"
],
[
"Minimally, the image data should be normalized so that the data has mean zero and equal variance. For image data, `(pixel - 128)/ 128` is a quick way to approximately normalize the data and can be used in this project. \n\nOther pre-processing steps are optional. You can try different techniques to see if it improves performance. \n\nUse the code cell (or multiple code cells, if necessary) to implement the first step of your project.",
"_____no_output_____"
]
],
[
[
"### Preprocess the data here. It is required to normalize the data. Other preprocessing steps could include \n### converting to grayscale, etc.\n### Feel free to use as many code cells as needed.\nimport tensorflow as tf\nfrom tensorflow.contrib.layers import flatten\nfrom math import ceil\nfrom sklearn.utils import shuffle",
"_____no_output_____"
],
[
"## Converting RGB image to grayscale image\n\nX_train_rgb = X_train\nX_train_gray = np.sum(X_train/3, axis=3, keepdims=True)\n\nX_test_rgb = X_test\nX_test_gray = np.sum(X_test/3, axis=3, keepdims=True)\n\nX_valid_rgb = X_valid\nX_valid_gray = np.sum(X_valid/3, axis=3, keepdims=True)\n\nprint(X_train_rgb.shape)\nprint(X_train_gray.shape)\n\nprint(X_test_rgb.shape)\nprint(X_test_gray.shape)\n\nprint(X_valid_rgb.shape)\nprint(X_valid_gray.shape)",
"(34799, 32, 32, 3)\n(34799, 32, 32, 1)\n(12630, 32, 32, 3)\n(12630, 32, 32, 1)\n(4410, 32, 32, 3)\n(4410, 32, 32, 1)\n"
],
[
"## As we have now converted all the rgb images to grayscale, here onwards we will be using this processed image to give it as an input to our network.\n\nX_train = X_train_gray\nX_test = X_test_gray\nX_valid = X_valid_gray",
"_____no_output_____"
],
[
"image_depth_channels = X_train.shape[3]\n\nprint(image_depth_channels)\n\nnumber_of_images = 8\nfigures = {}\nrandom_signs = []\n\nfor i in range(number_of_images):\n index = random.randint(0, n_train-1)\n labels[i] = name_values[y_train[index]][1].decode('ascii')\n figures[i] = X_train[index].squeeze()\n random_signs.append(index)\n\nprint(random_signs)\nfigures_plotting(figures, 4, 2, labels)",
"1\n[15918, 29400, 24586, 14649, 6659, 6797, 13657, 10236]\n"
],
[
"import cv2\n\nX_train_1 = []\ny_train_1 = []\n\nX_train_2 = []\ny_train_2 = []\n\nnew_counts_train = counts_train\nfor i in range(n_train):\n if(new_counts_train[y_train[i]] < 3000):\n for j in range(3):\n dx, dy = np.random.randint(-1.7, 1.8, 2)\n M = np.float32([[1,0,dx], [0, 1, dy]])\n dst = cv2.warpAffine(X_train[i], M, (X_train[i].shape[0], X_train[i].shape[1]))\n dst = dst[:,:, None]\n X_train_1.append(dst)\n y_train_1.append(y_train[i])\n \n random_higher_bound = random.randint(27, 32)\n random_lower_bound = random.randint(0, 5)\n \n points_one = np.float32([[0,0],[32,0],[0,32],[32,32]])\n points_two = np.float32([[0, 0], [random_higher_bound, random_lower_bound], [random_lower_bound, 32],[32, random_higher_bound]])\n \n M = cv2.getPerspectiveTransform(points_one, points_two)\n dst = cv2.warpPerspective(X_train[i], M, (32,32))\n \n X_train_2.append(dst)\n y_train_2.append(y_train[i])\n \n tilt = random.randint(-12, 12)\n M = cv2.getRotationMatrix2D((X_train[i].shape[0]/2, X_train[i].shape[1]/2), tilt, 1)\n dst = cv2.warpAffine(X_train[i], M, (X_train[i].shape[0], X_train[i].shape[1]))\n \n X_train_2.append(dst)\n y_train_2.append(y_train[i])\n \n new_counts_train[y_train[i]] += 2\n \nX_train_1 = np.array(X_train_1)\ny_train_1 = np.array(y_train_1)\nX_train = np.concatenate((X_train, X_train_1), axis=0)\ny_train = np.concatenate((y_train, y_train_1), axis=0)\n\nX_train_2 = np.array(X_train_2)\ny_train_2 = np.array(y_train_2)\nX_train_2 = np.reshape(X_train_2, (np.shape(X_train_2)[0], 32, 32, 1))\nX_train = np.concatenate((X_train, X_train_2), axis=0)\ny_train = np.concatenate((y_train, y_train_2), axis=0)\n\nX_train = np.concatenate((X_train, X_valid), axis=0)\ny_train = np.concatenate((y_train, y_valid), axis=0)",
"_____no_output_____"
],
[
"figures1 = {}\nlabels = {}\nfigures1[0] = X_train[n_train+1].squeeze()\nlabels[0] = y_train[n_train+1]\nfigures1[1] = X_train[0].squeeze()\nlabels[1] = y_train[0]\n\n\nfigures_plotting(figures1, 1, 2, labels)",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split\n\nX_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size = 0.2, random_state=0)\n\nprint(\"Modified Dataset Size : {}\".format(X_train.shape[0]))\n\nunique, counts = np.unique(y_train, return_counts=True)\nplt.bar(unique, counts)\nplt.grid()\nplt.title(\"Modified Train Dataset Sign Counts\")\nplt.show()\n\nunique, counts = np.unique(y_test, return_counts=True)\nplt.bar(unique, counts)\nplt.grid()\nplt.title(\"Modified Test Dataset Sign Counts\")\nplt.show()\n\nunique, counts = np.unique(y_valid, return_counts=True)\nplt.bar(unique, counts)\nplt.grid()\nplt.title(\"Modified Valid Dataset Sign Counts\")\nplt.show()",
"Modified Dataset Size : 76227\n"
],
[
"plt.savefig('Train Dataset Sign Counts')\nplt.savefig('Test Dataset Sign Counts')\nplt.savefig('Valid Dataset Sign Counts')",
"_____no_output_____"
],
[
"def normalize(a):\n return -np.log(1/((1 + a)/257) - 1)\n\nX_train_normalized = X_train/127.5 - 1\nX_test_normalized = X_test/127.5 - 1\n\nstop = 8\nfigures = {}\ncount = 0\nfor i in random_signs:\n labels[count] = name_values[y_train[i]][1].decode('ascii')\n figures[count] = X_train_normalized[i].squeeze()\n count += 1;\n \nfigures_plotting(figures, 4, 2, labels)",
"_____no_output_____"
],
[
"X_train = X_train_normalized\nX_test = X_test_normalized",
"_____no_output_____"
]
],
[
[
"### Model Architecture",
"_____no_output_____"
]
],
[
[
"### Define your architecture here.\n### Feel free to use as many code cells as needed.\ndef conv2d(x, W, b, strides = 1):\n x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding = 'VALID')\n print(x.shape)\n return tf.nn.relu(x)\n\ndef LeNet(x):\n mu = 0\n sigma = 0.1\n \n W_1 = tf.Variable(tf.truncated_normal(shape = (5, 5, image_depth_channels, 6), mean = mu, stddev = sigma))\n b_1 = tf.Variable(tf.zeros(6))\n layer_1 = conv2d(x, W_1, b_1, 1)\n layer_1 = tf.nn.max_pool(layer_1, ksize=[1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'VALID')\n print(layer_1.shape)\n print()\n \n W_2 = tf.Variable(tf.truncated_normal(shape = (5, 5, 6, 16), mean = mu, stddev = sigma))\n b_2 = tf.Variable(tf.zeros(16))\n layer_2 = conv2d(layer_1, W_2, b_2, 1)\n layer_2 = tf.nn.max_pool(layer_2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID')\n print(layer_2.shape)\n print()\n \n W_2_a = tf.Variable(tf.truncated_normal(shape = (5, 5, 16, 412), mean = mu, stddev = sigma))\n b_2_a = tf.Variable(tf.zeros(412))\n layer_2_a = conv2d(layer_2, W_2_a, b_2_a, 1)\n print(layer_2_a.shape)\n print()\n \n flat = flatten(layer_2_a)\n W_3 = tf.Variable(tf.truncated_normal(shape = (412, 122), mean = mu, stddev = sigma))\n b_3 = tf.Variable(tf.zeros(122))\n layer_3 = tf.nn.relu(tf.nn.bias_add(tf.matmul(flat, W_3), b_3))\n layer_3 = tf.nn.dropout(layer_3, keep_prob)\n \n W_4 = tf.Variable(tf.truncated_normal(shape = (122, 84), mean = mu, stddev = sigma))\n b_4 = tf.Variable(tf.zeros(84))\n layer_4 = tf.nn.relu(tf.nn.bias_add(tf.matmul(layer_3, W_4), b_4))\n layer_4 = tf.nn.dropout(layer_4, keep_prob)\n \n W_5 = tf.Variable(tf.truncated_normal(shape=(84, 43), mean = mu, stddev = sigma))\n b_5 = tf.Variable(tf.zeros(43))\n layer_5 = tf.nn.bias_add(tf.matmul(layer_4, W_5), b_5)\n \n return layer_5\n\nx = tf.placeholder(tf.float32, (None, 32, 32, image_depth_channels))\ny = tf.placeholder(tf.int32, (None))\none_hot_y = tf.one_hot(y, 43)\nkeep_prob = tf.placeholder(tf.float32)\n ",
"_____no_output_____"
]
],
[
[
"### Train, Validate and Test the Model",
"_____no_output_____"
],
[
"A validation set can be used to assess how well the model is performing. A low accuracy on the training and validation\nsets imply underfitting. A high accuracy on the training set but low accuracy on the validation set implies overfitting.",
"_____no_output_____"
]
],
[
[
"EPOCHS = 45\nbatch_size = 120\n\nrate = 0.00097",
"_____no_output_____"
],
[
"logits = LeNet(x)\n\ncross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=one_hot_y)\nloss_operation = tf.reduce_mean(cross_entropy)\noptimizer = tf.train.AdamOptimizer(learning_rate = rate)\ntraining_operation = optimizer.minimize(loss_operation)",
"(?, 28, 28, 6)\n(?, 14, 14, 6)\n\n(?, 10, 10, 16)\n(?, 5, 5, 16)\n\n(?, 1, 1, 412)\n(?, 1, 1, 412)\n\n"
],
[
"### Evaluation model\n\ncorrect_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))\naccuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\nsaver = tf.train.Saver()\n\ndef evaluate(X_data, y_data):\n num_examples = len(X_data)\n total_accuracy = 0\n sess = tf.get_default_session()\n for offset in range(0, num_examples, batch_size):\n batch_x, batch_y = X_data[offset:offset + batch_size], y_data[offset:offset + batch_size]\n accuracy = sess.run(accuracy_operation, feed_dict = {x: batch_x,\n y: batch_y, \n keep_prob: 1.0})\n total_accuracy += (accuracy * len(batch_x))\n \n return total_accuracy / num_examples",
"_____no_output_____"
],
[
"with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n num_examples = len(X_train)\n \n print(\"Training...\")\n print()\n validation_accuracy_figure = []\n test_accuracy_figure = []\n for i in range(EPOCHS):\n X_train, y_train = shuffle(X_train, y_train)\n for offset in range(0, num_examples, batch_size):\n end = offset + batch_size\n batch_x, batch_y = X_train[offset:end], y_train[offset:end]\n sess.run(training_operation, feed_dict={x: batch_x, y: batch_y, keep_prob: 0.5})\n \n validation_accuracy = evaluate(X_valid, y_valid)\n validation_accuracy_figure.append(validation_accuracy)\n \n test_accuracy = evaluate(X_train, y_train)\n test_accuracy_figure.append(test_accuracy)\n print(\"EPOCH {} ...\".format(i+1))\n print(\"Test Accuracy = {:.3f}\".format(test_accuracy))\n print(\"Validation Accuracy = {:.3f}\".format(validation_accuracy))\n print()\n \n saver.save(sess, './lenet')\n print(\"Model saved\")",
"Training...\n\nEPOCH 1 ...\nTest Accuracy = 0.817\nValidation Accuracy = 0.652\n\nEPOCH 2 ...\nTest Accuracy = 0.933\nValidation Accuracy = 0.828\n\nEPOCH 3 ...\nTest Accuracy = 0.964\nValidation Accuracy = 0.875\n\nEPOCH 4 ...\nTest Accuracy = 0.974\nValidation Accuracy = 0.899\n\nEPOCH 5 ...\nTest Accuracy = 0.986\nValidation Accuracy = 0.931\n\nEPOCH 6 ...\nTest Accuracy = 0.987\nValidation Accuracy = 0.942\n\nEPOCH 7 ...\nTest Accuracy = 0.993\nValidation Accuracy = 0.949\n\nEPOCH 8 ...\nTest Accuracy = 0.994\nValidation Accuracy = 0.954\n\nEPOCH 9 ...\nTest Accuracy = 0.992\nValidation Accuracy = 0.946\n\nEPOCH 10 ...\nTest Accuracy = 0.995\nValidation Accuracy = 0.950\n\nEPOCH 11 ...\nTest Accuracy = 0.997\nValidation Accuracy = 0.957\n\nEPOCH 12 ...\nTest Accuracy = 0.998\nValidation Accuracy = 0.971\n\nEPOCH 13 ...\nTest Accuracy = 0.997\nValidation Accuracy = 0.950\n\nEPOCH 14 ...\nTest Accuracy = 0.998\nValidation Accuracy = 0.968\n\nEPOCH 15 ...\nTest Accuracy = 0.998\nValidation Accuracy = 0.975\n\nEPOCH 16 ...\nTest Accuracy = 0.999\nValidation Accuracy = 0.963\n\nEPOCH 17 ...\nTest Accuracy = 0.997\nValidation Accuracy = 0.981\n\nEPOCH 18 ...\nTest Accuracy = 0.999\nValidation Accuracy = 0.978\n\nEPOCH 19 ...\nTest Accuracy = 0.998\nValidation Accuracy = 0.975\n\nEPOCH 20 ...\nTest Accuracy = 0.999\nValidation Accuracy = 0.983\n\nEPOCH 21 ...\nTest Accuracy = 0.999\nValidation Accuracy = 0.971\n\nEPOCH 22 ...\nTest Accuracy = 0.999\nValidation Accuracy = 0.987\n\nEPOCH 23 ...\nTest Accuracy = 0.999\nValidation Accuracy = 0.983\n\nEPOCH 24 ...\nTest Accuracy = 0.999\nValidation Accuracy = 0.958\n\nEPOCH 25 ...\nTest Accuracy = 0.999\nValidation Accuracy = 0.978\n\nEPOCH 26 ...\nTest Accuracy = 0.999\nValidation Accuracy = 0.979\n\nEPOCH 27 ...\nTest Accuracy = 0.998\nValidation Accuracy = 0.977\n\nEPOCH 28 ...\nTest Accuracy = 0.998\nValidation Accuracy = 0.969\n\nEPOCH 29 ...\nTest Accuracy = 0.999\nValidation Accuracy = 0.978\n\nEPOCH 30 ...\nTest Accuracy = 0.999\nValidation Accuracy = 0.979\n\nEPOCH 31 ...\nTest Accuracy = 0.998\nValidation Accuracy = 0.974\n\nEPOCH 32 ...\nTest Accuracy = 1.000\nValidation Accuracy = 0.978\n\nEPOCH 33 ...\nTest Accuracy = 0.999\nValidation Accuracy = 0.980\n\nEPOCH 34 ...\nTest Accuracy = 1.000\nValidation Accuracy = 0.982\n\nEPOCH 35 ...\nTest Accuracy = 1.000\nValidation Accuracy = 0.988\n\nEPOCH 36 ...\nTest Accuracy = 0.998\nValidation Accuracy = 0.975\n\nEPOCH 37 ...\nTest Accuracy = 0.996\nValidation Accuracy = 0.921\n\nEPOCH 38 ...\nTest Accuracy = 1.000\nValidation Accuracy = 0.985\n\nEPOCH 39 ...\nTest Accuracy = 1.000\nValidation Accuracy = 0.975\n\nEPOCH 40 ...\nTest Accuracy = 0.999\nValidation Accuracy = 0.981\n\nEPOCH 41 ...\nTest Accuracy = 1.000\nValidation Accuracy = 0.987\n\nEPOCH 42 ...\nTest Accuracy = 1.000\nValidation Accuracy = 0.985\n\nEPOCH 43 ...\nTest Accuracy = 1.000\nValidation Accuracy = 0.976\n\nEPOCH 44 ...\nTest Accuracy = 1.000\nValidation Accuracy = 0.973\n\nEPOCH 45 ...\nTest Accuracy = 0.998\nValidation Accuracy = 0.980\n\nModel saved\n"
],
[
"plt.plot(test_accuracy_figure, label = 'test')\n#plt.title(\"Test Accuracy\")\n#plt.show()\n\nplt.plot(validation_accuracy_figure, label = 'validation')\nplt.title(\"Validation Accuracy\")\nplt.legend()\nplt.show()",
"_____no_output_____"
],
[
"with tf.Session() as sess:\n saver.restore(sess, tf.train.latest_checkpoint('.'))\n\n train_accuracy = evaluate(X_train, y_train)\n print(\"Train Accuracy = {:.3f}\".format(train_accuracy))\n \n valid_accuracy = evaluate(X_valid, y_valid)\n print(\"Valid Accuracy = {:.3f}\".format(valid_accuracy)) \n \n test_accuracy = evaluate(X_test, y_test)\n print(\"Test Accuracy = {:.3f}\".format(test_accuracy))",
"INFO:tensorflow:Restoring parameters from ./lenet\nTrain Accuracy = 0.998\nValid Accuracy = 0.980\nTest Accuracy = 0.939\n"
]
],
[
[
"---\n\n## Step 3: Test a Model on New Images\n\nTo give yourself more insight into how your model is working, download at least five pictures of German traffic signs from the web and use your model to predict the traffic sign type.\n\nYou may find `signnames.csv` useful as it contains mappings from the class id (integer) to the actual sign name.",
"_____no_output_____"
],
[
"### Load and Output the Images",
"_____no_output_____"
]
],
[
[
"### Load the images and plot them here.\n### Feel free to use as many code cells as needed.\nimport glob\nimport cv2\nall_images = sorted(glob.glob('./test_images/*.png'))\nall_labels = np.array([1, 22, 35, 15, 37, 18])\n\nfigures = {}\nlabels = {}\nsigns = []\nindex = 0\nfor image in all_images:\n img = cv2.cvtColor(cv2.imread(image), cv2.COLOR_BGR2RGB)\n signs.append(img)\n figures[index] = img\n labels[index] = name_values[all_labels[index]][1].decode('ascii')\n index += 1\n \nfigures_plotting(figures, 3, 2, labels)",
"_____no_output_____"
],
[
"signs = np.array(signs)\ngray_signs = np.sum(signs/3, axis = 3, keepdims=True)\nnormalized_signs = gray_signs/127.5-1\n\nnumber_of_images = 6\nfigures = {}\nlabels = {}\nfor i in range(number_of_images):\n labels[i] = name_values[all_labels[i]][1].decode('ascii')\n figures[i] = gray_signs[i].squeeze()\n \nfigures_plotting(figures, 3, 2, labels)",
"_____no_output_____"
]
],
[
[
"### Predict the Sign Type for Each Image",
"_____no_output_____"
]
],
[
[
"### Run the predictions here and use the model to output the prediction for each image.\n### Make sure to pre-process the images with the same pre-processing pipeline used earlier.\n### Feel free to use as many code cells as needed.\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n saver = tf.train.import_meta_graph('./lenet.meta')\n saver.restore(sess, \"./lenet\")\n my_accuracy = evaluate(normalized_signs, all_labels)\n print(\"My Data Set Accuracy = {:.3f}\".format(my_accuracy))",
"INFO:tensorflow:Restoring parameters from ./lenet\nMy Data Set Accuracy = 0.833\n"
]
],
[
[
"### Analyze Performance",
"_____no_output_____"
]
],
[
[
"### Calculate the accuracy for these 5 new images. \n### For example, if the model predicted 1 out of 5 signs correctly, it's 20% accurate on these new images.\nmy_single_item_array = []\nmy_single_item_label_array = []\n\nfor i in range(6):\n my_single_item_array.append(normalized_signs[i])\n my_single_item_label_array.append(all_labels[i])\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n# saver = tf.train.import_meta_graph('./lenet.meta')\n saver.restore(sess, \"./lenet\")\n my_accuracy = evaluate(my_single_item_array, my_single_item_label_array)\n print('Image {}'.format(i+1))\n print(\"Image Accuracy = {:.3f}\".format(my_accuracy))\n print()",
"INFO:tensorflow:Restoring parameters from ./lenet\nImage 1\nImage Accuracy = 1.000\n\nINFO:tensorflow:Restoring parameters from ./lenet\nImage 2\nImage Accuracy = 1.000\n\nINFO:tensorflow:Restoring parameters from ./lenet\nImage 3\nImage Accuracy = 1.000\n\nINFO:tensorflow:Restoring parameters from ./lenet\nImage 4\nImage Accuracy = 0.750\n\nINFO:tensorflow:Restoring parameters from ./lenet\nImage 5\nImage Accuracy = 0.800\n\nINFO:tensorflow:Restoring parameters from ./lenet\nImage 6\nImage Accuracy = 0.833\n\n"
]
],
[
[
"### Output Top 5 Softmax Probabilities For Each Image Found on the Web",
"_____no_output_____"
],
[
"For each of the new images, print out the model's softmax probabilities to show the **certainty** of the model's predictions (limit the output to the top 5 probabilities for each image). [`tf.nn.top_k`](https://www.tensorflow.org/versions/r0.12/api_docs/python/nn.html#top_k) could prove helpful here. \n\nThe example below demonstrates how tf.nn.top_k can be used to find the top k predictions for each image.\n\n`tf.nn.top_k` will return the values and indices (class ids) of the top k predictions. So if k=3, for each sign, it'll return the 3 largest probabilities (out of a possible 43) and the correspoding class ids.\n\nTake this numpy array as an example. The values in the array represent predictions. The array contains softmax probabilities for five candidate images with six possible classes. `tf.nn.top_k` is used to choose the three classes with the highest probability:\n\n```\n# (5, 6) array\na = np.array([[ 0.24879643, 0.07032244, 0.12641572, 0.34763842, 0.07893497,\n 0.12789202],\n [ 0.28086119, 0.27569815, 0.08594638, 0.0178669 , 0.18063401,\n 0.15899337],\n [ 0.26076848, 0.23664738, 0.08020603, 0.07001922, 0.1134371 ,\n 0.23892179],\n [ 0.11943333, 0.29198961, 0.02605103, 0.26234032, 0.1351348 ,\n 0.16505091],\n [ 0.09561176, 0.34396535, 0.0643941 , 0.16240774, 0.24206137,\n 0.09155967]])\n```\n\nRunning it through `sess.run(tf.nn.top_k(tf.constant(a), k=3))` produces:\n\n```\nTopKV2(values=array([[ 0.34763842, 0.24879643, 0.12789202],\n [ 0.28086119, 0.27569815, 0.18063401],\n [ 0.26076848, 0.23892179, 0.23664738],\n [ 0.29198961, 0.26234032, 0.16505091],\n [ 0.34396535, 0.24206137, 0.16240774]]), indices=array([[3, 0, 5],\n [0, 1, 4],\n [0, 5, 1],\n [1, 3, 5],\n [1, 4, 3]], dtype=int32))\n```\n\nLooking just at the first row we get `[ 0.34763842, 0.24879643, 0.12789202]`, you can confirm these are the 3 largest probabilities in `a`. You'll also notice `[3, 0, 5]` are the corresponding indices.",
"_____no_output_____"
]
],
[
[
"### Print out the top five softmax probabilities for the predictions on the German traffic sign images found on the web. \n### Feel free to use as many code cells as needed.\n\nk_size = 5\nsoftmax_logits = tf.nn.softmax(logits)\ntop_k = tf.nn.top_k(softmax_logits, k=k_size)\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n saver.restore(sess, \"./lenet\")\n my_softmax_logits = sess.run(softmax_logits, feed_dict={x: normalized_signs, keep_prob: 1.0})\n my_top_k = sess.run(top_k, feed_dict={x: normalized_signs, keep_prob: 1.0})\n\n for i in range(6):\n figures = {}\n labels = {}\n \n figures[0] = signs[i]\n labels[0] = \"Original\"\n \n for j in range(k_size):\n labels[j+1] = 'Guess {} : ({:.0f}%)'.format(j+1, 100*my_top_k[0][i][j])\n figures[j+1] = X_valid[np.argwhere(y_valid == my_top_k[1][i][j])[0]].squeeze()\n \n figures_plotting(figures, 1, 6, labels)",
"INFO:tensorflow:Restoring parameters from ./lenet\n"
]
],
[
[
"### Project Writeup\n\nOnce you have completed the code implementation, document your results in a project writeup using this [template](https://github.com/udacity/CarND-Traffic-Sign-Classifier-Project/blob/master/writeup_template.md) as a guide. The writeup can be in a markdown or pdf file. ",
"_____no_output_____"
],
[
"> **Note**: Once you have completed all of the code implementations and successfully answered each question above, you may finalize your work by exporting the iPython Notebook as an HTML document. You can do this by using the menu above and navigating to \\n\",\n \"**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
4a17b3b1fe94ee59e63a9d2a10e421816a2905d1
| 106,839 |
ipynb
|
Jupyter Notebook
|
newstools/goodarticle/.ipynb_checkpoints/predictor-checkpoint.ipynb
|
hayj/NewsTools
|
847bf5ea2f33273c6158a2aa0a962beb5667d431
|
[
"MIT"
] | 1 |
2018-06-18T13:03:50.000Z
|
2018-06-18T13:03:50.000Z
|
newstools/goodarticle/.ipynb_checkpoints/predictor-checkpoint.ipynb
|
hayj/NewsTools
|
847bf5ea2f33273c6158a2aa0a962beb5667d431
|
[
"MIT"
] | null | null | null |
newstools/goodarticle/.ipynb_checkpoints/predictor-checkpoint.ipynb
|
hayj/NewsTools
|
847bf5ea2f33273c6158a2aa0a962beb5667d431
|
[
"MIT"
] | null | null | null | 57.31706 | 2,347 | 0.608532 |
[
[
[
"from systemtools.hayj import *\nfrom systemtools.basics import *\nfrom systemtools.file import *\nfrom systemtools.printer import *\nfrom systemtools.logger import *\nfrom annotator.annot import *\nfrom datatools.jsonutils import *\nfrom nlptools.tokenizer import *\nfrom datatools.htmltools import *\nfrom newssource.goodarticle.utils import *\nimport numpy as np\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.svm import SVR, LinearSVC\nfrom sklearn import linear_model\nfrom sklearn.model_selection import StratifiedKFold",
"_____no_output_____"
],
[
"data = []\nfor file in sortedGlob(\"goodarticle*.json\"):\n data += fromJsonFile(file)",
"_____no_output_____"
],
[
"stopwords = set(fileToStrList(\"stopwords.txt\"))\nstartswithExcludes = set(fileToStrList(\"startswith-excludes.txt\"))",
"_____no_output_____"
],
[
"newData = []\nfor i in range(len(data)):\n data[i][\"text\"] = newsPreclean(data[i][\"text\"], startswithExcludes=startswithExcludes)\n if len(data[i][\"text\"]) > 0:\n newData.append(data[i])\ndata = newData",
"_____no_output_____"
],
[
"bp(data, 2)\nprint(len(data))",
"[\n {\n authors: [ Daniel Marans, Reporter ],\n dataset: twinews,\n domain: huffingtonpost.com,\n relevance: 1.0,\n text: In a Saturday blogpost, Democratic National Committee chairman Tom Perez vowed to enact reforms to t,\n title: DNC Chairman Promises To Reform Party's Presidential Nominating Process | HuffPost,\n url: https://www.huffingtonpost.com/entry/tom-perez-dnc-chair-reform-presidential-nominating-process_us_5\n },\n {\n authors: [ Dirk Verdoorn ],\n dataset: twinews,\n domain: kcra.com,\n relevance: 0.0,\n text: Hearst Television participates in various affiliate marketing programs, which means we may get paid ,\n title: Sacramento and Northern California Weather Updates – KCRA 3 News,\n url: https://www.kcra.com/weather?platform=hootsuite\n },\n ...,\n {\n authors: None,\n dataset: newsandblog,\n domain: nypost.com,\n relevance: 0.6,\n text: MORE ON: Tiler Peck wants you to know there's more to being a ballerina than standing on your toes. ,\n title: NYC Ballet's Tiler Peck pirouettes into a new Web series | New York Post,\n url: http://nypost.com/2013/11/01/nyc-ballets-tiler-peck-pirouettes-into-a-new-web-series/\n },\n {\n authors: None,\n dataset: newsandblog,\n domain: indiatimes.com,\n relevance: 1.0,\n text: NEW DELHI: The S&P BSE Sensex started on a positive note but quickly pared early morning gains and t,\n title: Sensex turns choppy after positive start; top ten stocks in focus - The Economic Times,\n url: http://economictimes.indiatimes.com/markets/stocks/stocks-in-news/sensex-turns-choppy-after-positive\n }\n]\n825\n"
],
[
"def basicFeatures\\\n(\n text,\n longLine=140,\n shortLine=20,\n tooLongDocument=60000,\n stopwords={},\n punct={',', ')', '...', \"'\", ';', '-', '!', ':', '?', '\"', '.', '('},\n logger=None,\n verbose=True,\n asDict=False,\n asNpArray=True,\n):\n # Checking vars:\n if stopwords is None or len(stopwords) == 0 or punct is None or len(punct) == 0:\n logWarning(\"Please give a stopwords list and a punct list\", logger, verbose=verbose)\n features = OrderedDict()\n # Too long document ?\n features[\"tooLongDocument\"] = len(text) >= tooLongDocument\n # Len of the text:\n features[\"length\"] = len(text)\n # The count of non-blank lines:\n lines = [e for e in text.split(\"\\n\") if e != '']\n features[\"linesCount\"] = len(lines)\n # The count of tokens:\n loweredText = text.lower()\n tokens = [e for e in text.split() if e != '']\n loweredTokens = [e for e in loweredText.split() if e != '']\n features[\"tokensCount\"] = len(tokens)\n # Count of long lines, mean lines length, count of short lines:\n longLinesCount = 0\n shortLinesCount = 0\n meanLinesLength = 0\n for line in lines:\n if len(line) >= longLine:\n longLinesCount += 1\n if len(line) <= shortLine:\n shortLinesCount += 1\n meanLinesLength += len(line)\n meanLinesLength = meanLinesLength / len(lines)\n features[\"longLinesCount\"] = longLinesCount\n features[\"shortLinesCount\"] = shortLinesCount\n features[\"meanLinesLength\"] = meanLinesLength\n features[\"longLinesRatio\"] = longLinesCount / len(lines)\n features[\"shortLinesRatio\"] = shortLinesCount / len(lines)\n # The ratio of stopwords / punct:\n stopwordsAndPunct = stopwords.union(punct)\n c = len([e for e in loweredTokens if e in stopwordsAndPunct])\n features[\"stopwordsPunctRatio\"] = c / len(loweredTokens)\n # The mean overlap:\n nonSWPTokens = [e for e in loweredTokens if e not in stopwordsAndPunct]\n c = dict()\n for token in nonSWPTokens:\n if token not in c:\n c[token] = 0\n c[token] += 1\n theMean = 0\n for token, count in c.items():\n theMean += count\n theMean = theMean / len(c)\n features[\"nonSWPMeanOverlap\"] = theMean\n # Ratio of only uppercased words:\n upperWordCount = len([e for e in tokens if hasLetter(e) and not hasLowerLetter(e)])\n features[\"upperWordCount\"] = upperWordCount\n features[\"upperWordRatio\"] = upperWordCount / len(tokens)\n # Ratio of non words:\n nonWordCount = len([e for e in tokens if not hasLetter(e)])\n features[\"nonWordCount\"] = nonWordCount\n features[\"nonWordRatio\"] = nonWordCount / len(tokens)\n # Ratio of html:\n htmlCharCount = len(text) - len(html2Text(text))\n if htmlCharCount < 0:\n htmlCharCount = 0\n features[\"htmlCharCount\"] = htmlCharCount\n features[\"htmlCharRatio\"] = htmlCharCount / len(text)\n # Ratio of words that has at least on upper case:\n c = 0\n for token in tokens:\n if hasUpperLetter(token):\n c += 1\n features[\"hasUpperRatio\"] = c / len(tokens)\n # Ratio of lines that start with a non word:\n c = 0\n for line in lines:\n line = line.split()\n if len(line) > 0:\n if not hasLetter(line[0]):\n c += 1\n features[\"lineStartWithNonWordRatio\"] = c / len(lines)\n # Encoding prob count:\n encCount = 0\n encCount += text.count(\"â\")\n encCount += text.count(\"ï\")\n encCount += text.count(\"U+\")\n encCount += text.count(\"Ï\")\n encCount += text.count(\"À\")\n encCount += text.count(\"Á\")\n encCount += text.count(\"Ã\")\n encCount += text.count(\"�\")\n encCount += text.count(\"\")\n features[\"encodingProbCount\"] = encCount\n # Finally we return all features:\n if asDict:\n return features\n else:\n result = list(features.values())\n if asNpArray:\n return np.array(result)\n else:\n return result",
"_____no_output_____"
],
[
"def accuracy(predictions, y, thresholds=[0.25, 0.75]):\n assert len(predictions) == len(y)\n wellClassified = 0\n for i in range(len(y)):\n prediction = predictions[i]\n currentPredictedClass = continuous2discret(prediction, thresholds)\n currentY = y[i]\n currentClass = continuous2discret(currentY, thresholds)\n if currentPredictedClass == currentClass:\n wellClassified += 1\n return wellClassified / len(y)\ndef continuous2discret(y, thresholds):\n currentClass = 0\n for threshold in thresholds:\n if y <= threshold:\n return currentClass\n currentClass += 1\n return currentClass",
"_____no_output_____"
],
[
"for i, current in enumerate(data):\n if current[\"relevance\"] == 0.0:\n text = current[\"text\"]\n text = newsPreclean(text, startswithExcludes=startswithExcludes)\n bp(basicFeatures(text, stopwords=stopwords, asDict=True), 5)\n print()\n print(text)\n print()\n print()\n print()\n if i >= 3:\n break",
"{ 'encodingProbCount': 0, 'hasUpperRatio': 0.1875, 'htmlCharCount': 0, 'htmlCharRatio': 0.0, 'length': 218, 'lineStartWithNonWordRatio': 0.5, 'linesCount': 2, 'longLinesCount': 1, 'longLinesRatio': 0.5, 'meanLinesLength': 108.5, 'nonSWPMeanOverlap': 1.1111111111111112, 'nonWordCount': 1, 'nonWordRatio': 0.03125, 'shortLinesCount': 0, 'shortLinesRatio': 0.0, 'stopwordsPunctRatio': 0.375, 'tokensCount': 32, 'tooLongDocument': False, 'upperWordCount': 1, 'upperWordRatio': 0.03125 }\n\nHearst Television participates in various affiliate marketing programs, which means we may get paid commissions on purchases made through our links to retailer sites.\n©2018, Hearst Television Inc. on behalf of KCRA-TV.\n\n\n\n{ 'encodingProbCount': 0, 'hasUpperRatio': 0.22727272727272727, 'htmlCharCount': 0, 'htmlCharRatio': 0.0, 'length': 277, 'lineStartWithNonWordRatio': 0.0, 'linesCount': 1, 'longLinesCount': 1, 'longLinesRatio': 1.0, 'meanLinesLength': 277.0, 'nonSWPMeanOverlap': 1.1904761904761905, 'nonWordCount': 0, 'nonWordRatio': 0.0, 'shortLinesCount': 0, 'shortLinesRatio': 0.0, 'stopwordsPunctRatio': 0.4318181818181818, 'tokensCount': 44, 'tooLongDocument': False, 'upperWordCount': 0, 'upperWordRatio': 0.0 }\n\nPresident Trump’s reliance on Twitter for insult and mockery is well known. But the list of things that he has publicly praised is just as revealing. Compliments since Mr. Trump became president are highlighted. (This list covers tweets since Mr. Trump declared his candidacy.)\n\n\n\n"
],
[
"X = np.array([basicFeatures(current[\"text\"], stopwords=stopwords) for current in data])",
"_____no_output_____"
],
[
"y = np.array([continuous2discret(current[\"relevance\"], [0.51]) for current in data])",
"_____no_output_____"
],
[
"bp(X)\nbp(y)\nprint(len(y))",
"[[0.00000000e+00 2.59600000e+03 1.00000000e+01 ... 1.63727960e-01\n 0.00000000e+00 0.00000000e+00]\n [0.00000000e+00 2.18000000e+02 2.00000000e+00 ... 1.87500000e-01\n 5.00000000e-01 0.00000000e+00]\n [0.00000000e+00 7.22300000e+03 2.90000000e+01 ... 1.85550082e-01\n 0.00000000e+00 0.00000000e+00]\n ...\n [0.00000000e+00 2.02300000e+03 1.00000000e+00 ... 2.37942122e-01\n 0.00000000e+00 0.00000000e+00]\n [0.00000000e+00 3.23400000e+03 1.00000000e+00 ... 1.96864111e-01\n 0.00000000e+00 7.00000000e+00]\n [0.00000000e+00 2.97100000e+03 1.00000000e+00 ... 1.69201521e-01\n 0.00000000e+00 0.00000000e+00]]\n[1 0 1 1 1 1 0 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 0 1 1 1 0\n 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 0 1 1 1 1 0\n 0 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1\n 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1\n 1 1 0 0 1 1 1 1 1 1 1 0 1 1 0 1 0 0 1 1 1 1 1 0 1 1 1 1 1 1 1 0 1 0 1 1 1\n 1 0 1 1 0 0 1 1 1 0 0 1 1 0 1 1 1 1 1 1 0 1 0 1 1 1 1 1 1 1 0 1 1 0 1 1 1\n 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n 1 1 1 1 1 1 1 1 0 1 0 1 1 1 1 1 0 0 1 0 1 1 0 1 1 1 1 0 0 0 1 0 1 0 0 1 1\n 0 1 1 0 1 1 0 1 1 0 0 1 1 1 1 0 0 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 0 1\n 1 1 0 0 1 1 0 1 0 1 1 1 1 1 1 1 0 0 0 1 0 0 1 1 0 0 0 1 0 0 0 1 1 0 0 1 0\n 1 1 1 1 1 1 1 0 0 0 1 0 0 0 0 0 0 0 0 1 1 0 0 0 1 1 0 1 0 1 1 1 1 1 0 1 1\n 0 0 0 0 0 1 0 1 1 1 0 0 0 1 0 1 0 0 1 1 0 1 1 1 1 1 1 1 1 0 0 0 0 0 1 1 0\n 1 1 1 0 1 1 0 0 0 1 0 0 0 0 0 0 0 1 1 0 1 0 0 1 1 1 0 0 1 0 1 0 1 1 0 0 1\n 1 0 0 0 1 0 0 1 1 1 1 1 1 1 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0\n 1 0 0 0 0 0 0 0 0 1 1 0 1 0 1 1 0 0 0 0 0 0 1 1 1 1 1 1 0 1 1 0 0 1 0 0 0\n 1 1 1 1 1 1 0 0 1 0 1 1 0 0 0 1 0 1 1 1 0 1 1 0 0 0 0 1 1 1 1 1 0 1 1 0 0\n 1 1 0 1 1 1 1 1 0 1 1 1 1 1 0 1 1 0 0 0 1 1 1 1 1 1 1 1 1 1 0 1 1 1 0 1 1\n 1 1 0 0 1 0 1 1 1 1 0 0 1 0 0 1 0 1 1 1 1 0 1 1 1 1 1 1 1 0 1 0 0 1 1 1 0\n 0 0 0 0 0 0 1 0 1 1 1 0 0 0 0 0 0 1 1 1 1 1 0 0 0 1 1 1 0 1 0 1 1 1 0 1 0\n 1 0 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 1 0 0 1 1 1 1 0 0 0\n 0 0 0 0 1 0 0 1 1 0 0 0 0 0 0 0 0 1 0 0 0 1 0 1 0 0 1 0 0 1 0 0 0 0 1 1 0\n 1 0 0 0 0 1 1 1 1 1 0 0 0 1 1 1 0 0 0 1 0 1 0 1 1 1 1 1 0 1 1 1 1 1 1 1 1\n 1 1 1 1 1 1 1 1 1 1 1]\n825\n"
],
[
"if False:\n from sklearn.metrics import make_scorer\n scorer = make_scorer(accuracy, greater_is_better=True) # scorer(clf, X, y)",
"_____no_output_____"
],
[
"param_grid = \\\n{\n 'loss': ['squared_hinge'], # 'hinge'\n 'penalty': ['l1', 'l2',], # l1, l2\n 'C': list(range(0, 20 + 1)),\n 'multi_class': ['ovr'], # , 'crammer_singer'\n 'dual': [False, True],\n 'random_state': [0],\n}",
"_____no_output_____"
],
[
"clf = GridSearchCV(LinearSVC(), param_grid=param_grid, scoring='accuracy',\n cv=StratifiedKFold(n_splits=5, random_state=0, shuffle=True), n_jobs=cpuCount(),\n error_score=0.0)",
"_____no_output_____"
],
[
"clf.fit(X, y)",
"_____no_output_____"
],
[
"print(\"Best parameters set found on development set:\")\nprint()\nprint(clf.best_params_)\nprint(clf.best_score_)\nprint()\nprint(\"Grid scores on development set:\")\nprint()\nmeans = clf.cv_results_['mean_test_score']\nstds = clf.cv_results_['std_test_score']\nfor mean, std, params in zip(means, stds, clf.cv_results_['params']):\n print(\"%0.3f (+/-%0.03f) for %r\"\n % (mean, std * 2, params))",
"Best parameters set found on development set:\n\n{'C': 6, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.9236363636363636\n\nGrid scores on development set:\n\n0.000 (+/-0.000) for {'C': 0, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.000 (+/-0.000) for {'C': 0, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.000 (+/-0.000) for {'C': 0, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.000 (+/-0.000) for {'C': 0, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.899 (+/-0.041) for {'C': 1, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.916 (+/-0.054) for {'C': 1, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.000 (+/-0.000) for {'C': 1, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.813 (+/-0.202) for {'C': 1, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.912 (+/-0.039) for {'C': 2, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.921 (+/-0.050) for {'C': 2, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.000 (+/-0.000) for {'C': 2, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.898 (+/-0.033) for {'C': 2, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.914 (+/-0.033) for {'C': 3, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.918 (+/-0.052) for {'C': 3, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.000 (+/-0.000) for {'C': 3, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.842 (+/-0.178) for {'C': 3, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.915 (+/-0.036) for {'C': 4, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.914 (+/-0.049) for {'C': 4, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.000 (+/-0.000) for {'C': 4, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.842 (+/-0.165) for {'C': 4, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.915 (+/-0.041) for {'C': 5, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.919 (+/-0.051) for {'C': 5, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.000 (+/-0.000) for {'C': 5, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.839 (+/-0.184) for {'C': 5, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.915 (+/-0.041) for {'C': 6, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.924 (+/-0.055) for {'C': 6, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.000 (+/-0.000) for {'C': 6, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.893 (+/-0.105) for {'C': 6, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.915 (+/-0.041) for {'C': 7, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.918 (+/-0.044) for {'C': 7, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.000 (+/-0.000) for {'C': 7, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.918 (+/-0.049) for {'C': 7, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.915 (+/-0.041) for {'C': 8, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.918 (+/-0.051) for {'C': 8, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.000 (+/-0.000) for {'C': 8, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.916 (+/-0.047) for {'C': 8, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.916 (+/-0.037) for {'C': 9, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.920 (+/-0.047) for {'C': 9, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.000 (+/-0.000) for {'C': 9, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.916 (+/-0.047) for {'C': 9, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.916 (+/-0.037) for {'C': 10, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.921 (+/-0.050) for {'C': 10, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.000 (+/-0.000) for {'C': 10, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.916 (+/-0.047) for {'C': 10, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.916 (+/-0.037) for {'C': 11, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.921 (+/-0.050) for {'C': 11, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.000 (+/-0.000) for {'C': 11, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.902 (+/-0.054) for {'C': 11, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.916 (+/-0.037) for {'C': 12, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.922 (+/-0.055) for {'C': 12, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.000 (+/-0.000) for {'C': 12, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.902 (+/-0.054) for {'C': 12, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.916 (+/-0.037) for {'C': 13, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.919 (+/-0.056) for {'C': 13, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.000 (+/-0.000) for {'C': 13, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.902 (+/-0.054) for {'C': 13, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.916 (+/-0.037) for {'C': 14, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.915 (+/-0.053) for {'C': 14, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.000 (+/-0.000) for {'C': 14, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.895 (+/-0.055) for {'C': 14, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.918 (+/-0.038) for {'C': 15, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.919 (+/-0.051) for {'C': 15, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.000 (+/-0.000) for {'C': 15, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.895 (+/-0.055) for {'C': 15, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.918 (+/-0.038) for {'C': 16, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.920 (+/-0.047) for {'C': 16, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.000 (+/-0.000) for {'C': 16, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.895 (+/-0.055) for {'C': 16, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.918 (+/-0.038) for {'C': 17, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.915 (+/-0.056) for {'C': 17, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.000 (+/-0.000) for {'C': 17, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.895 (+/-0.055) for {'C': 17, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.918 (+/-0.038) for {'C': 18, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.920 (+/-0.047) for {'C': 18, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.000 (+/-0.000) for {'C': 18, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.895 (+/-0.055) for {'C': 18, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.919 (+/-0.039) for {'C': 19, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.919 (+/-0.049) for {'C': 19, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.000 (+/-0.000) for {'C': 19, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.895 (+/-0.055) for {'C': 19, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.919 (+/-0.039) for {'C': 20, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.918 (+/-0.056) for {'C': 20, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n0.000 (+/-0.000) for {'C': 20, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l1', 'random_state': 0}\n0.895 (+/-0.055) for {'C': 20, 'dual': True, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}\n"
],
[
"best = clf.best_estimator_",
"_____no_output_____"
],
[
"predictions = best.predict(X)",
"_____no_output_____"
],
[
"predictions",
"_____no_output_____"
],
[
"wellClassified = 0\nfor i in range(len(predictions)):\n if predictions[i] == y[i]:\n wellClassified += 1\n else:\n print(\"prediction: \" + str(predictions[i]))\n print(\"true label: \" + str(y[i]))\n print(data[i][\"text\"])\n print()\n print()\n print()\n print()\nprint(wellClassified / len(y))",
"prediction: 0\ntrue label: 1\nEmail\nA woman accused of driving drunk as she livestreamed a crash that killed her younger sister says she made the video to help pay for her sibling’s funeral expenses.\nJacqueline Sanchez Estrada is shown in a photo posted to a GoFundMe page for her family.\nObdulia Sanchez, who has been detained since her July 21 arrest, described her reasoning in a four-page, double-sided letter penned to a reporter at KGPE-TV in Fresno.\n“I made that video because I knew I had more than 5,000 followers,” the 18-year-old Stockton resident wrote. “It was the only way my sister would get a decent burial. I would never expose my sister like that. I anticipated the public donating money because my family isn't rich.\"\nSanchez apologized for making the video, saying, “I look awful, but I accomplished my goal.”\n\n\n\n\nprediction: 0\ntrue label: 1\nDavid O'Reill, scientific director for British American Tobacco said nicotine makes a person's brain work better\nHealth experts have branded the claim 'irresponsible' accusing him of trying to sell more cigarettes\nDepartment of Health lists smoking as the cause of the most preventable deaths in the UK - reaching 80,000 in 2011\n<img src=\"http://i.dailymail.co.uk/i/pix/2013/12/15/article-2523949-173D1017000005DC-872_306x423.jpg%22 height=\"423\" width=\"306\" alt=\"Scientist David O'Reilly, who works for the tobacco industry, said nicotine makes your brain work better\" class=\"blkBorder\"/> Scientist David O'Reilly, who works for the tobacco industry, said nicotine makes your brain work better\nA scientist employed by one of the biggest cigarette manufacturers today said nicotine is good for your health.\nGroup scientific director for British American Tobacco, which makes Benson & Hedges, Dunhill and Lucky Strikes, David O'Reilly has been branded 'irresponsible' and accused of telling just part of the story.\nHe told the Sunday Times Magazine smoking helps a person's brain work more effectively, adding: 'It helps with cognition, stimulation and relaxes.'\nHis controversial claim came just a week after New York University researchers warned smokers of ecigarettes could inhale more nicotine than those who smoke regular cigarettes.\nMr O'Reilly said likened taking a puff from an ecigarette to drinking a cup of coffee, as he claimed the practise was safe.\nBut the senior molecular biologist has come under fire from health experts, who have accused him of trying to help 'sell as many cigarettes as possible'.\nProfessor John Britton, chairman of the Royal College of Physicians tobacco advisory group and professor of epidemiology at the University of Nottingham, said one hit of nicotine can have positive effects on the brain.\nBut he warned the drug is highly addictive, leaving smokers needing to get their hit to enable their brains to function normally.\n'It is true that in a single use, nicotine probably does improve concentration and hand-eye co-ordination, on a par with what you get from caffeine,' he said.\n<img src=\"http://i.dailymail.co.uk/i/pix/2013/12/15/article-2523949-0016E77900000258-953_634x502.jpg%22 height=\"502\" width=\"634\" alt=\"Health expert Professor John Britton claimed Mr O'Reilly's claim was 'irresponsible' accusing him of trying to 'sell more cigarettes'\" class=\"blkBorder\"/> Health expert Professor John Britton claimed Mr O'Reilly's claim was 'irresponsible' accusing him of trying to 'sell more cigarettes'\n'But it is also a powerfully addictive drug, and there comes a time when you need nicotine just for your brain to function normally.'\nA spokesman for BAT, said the company 'absolutely' stood by Mr O'Reilly's statement.\nHe said Mr O'Reilly had not advised the drug was good for you in the sense of keeping well hydrated.\nA spokeswoman for Cancer Research added: ' We don't fully understand the long-term effects of nicotine use.'\n\n\n\n\nprediction: 0\ntrue label: 1\nChina has installed weapons on all seven of the artificial islands it has built in disputed water of the South China Sea, according to a U.S. think tank’s analysis of satellite imagery. Photo: Digitalglobe/Reuters\nBEIJING—A U.S. think-tank report that China has installed antiaircraft weapons and other arms on all seven islands it has built in the South China Sea is raising the stakes in a regional dispute as U.S. President-elect Donald Trump signals he is ready to confront Beijing on territorial issues.\nThe Asia Maritime Transparency Initiative said Wednesday that satellite imagery showed China had installed the weapons in recent months, despite President Xi Jinping’s pledge not to militarize the islands in the Spratly...\n\n\n\n\nprediction: 0\ntrue label: 1\nChinese smartphone maker Vivo is reportedly planning on expanding its V series by launching a new addition – Vivo V5 Plus in India at an event held on 23 January, 2017. The smartphone will succeed the Vivo V5 and will feature numerous mid-range specifications.\nAll you need to know about the smartphone\nThe Vivo V5 Plus is said to feature a 5.5-inch full HD 2.5D curved glass display with Corning Gorilla Glass 3 protection. The device will be powered by a 1.5GHz octa-core MediaTek MT6750 processor coupled with 4GB RAM and 32GB of internal storage (expandable up to 128GB through microSD card).\nOn the camera front, the device packs a 13MP rear camera with Phase Detection Auto-Focus (PDAF) and LED flash along with a 20MP front shooter with soft light LED flash. The smartphone also includes a fingerprint sensor which is integrated into the home button.\nThe smartphone offers a 3000mAh battery and runs on Android 6.0 Marshmallow OS. Connectivity options include a dual SIM option, 4G with VoLTE, 2G/3G, Wi-Fi hotspot, 3.5mm audio jack, FM radio, Bluetooth, GPS and a micro USB port.\n\n\n\n\nprediction: 0\ntrue label: 1\nAs for the Blogger dinner...I vote for Pasta Luna, for the following reasons: a) I love Italian food b) The portions there are huge c) They have creme brulee, which is the food of the gods as far as I'm concerned d) While I loved the food at India K'raja, the service was awful and I suffered from a sad tummy afterward e) Did I mention I love Italian food? f) You know what I REALLY love? Octopussy. What a great flick.\n\n\n\n\nprediction: 0\ntrue label: 1\nI'm getting very irritated by how my blog looks right now. But I can't seem to get the focus and concentration to fix anything. That includes school stuff.\nI'm just shuffling along, doing the absolute minimal that I can possibly get away with.\nIt irritates me. Well, part of me.\nI'll be very glad when the semester is over. When it is, I think I shall kidnap Tariq's laptop, sit in Starbucks, and buy myself a non-stop flow of rhumba frappucino until my blog looks satisfactory.\nHmm, must save the money for such an expensive endeavour.\n\n\n\n\nprediction: 1\ntrue label: 0\nok, so stephan may be going to italy, which means mean mamma will be going no where this winter. well, maybe nowhere- unless she can come up with some alternative travel scenario. the trip to italy would be a good way for scw to see italy, because it is not a place that mean mamma would go without a group- and may be not even then- i guess that makes me romanophobic.......\n\n\n\n\nprediction: 1\ntrue label: 0\nman, chris, you are living the sweet life. (i'm going to assume that ben was talking about a missed call from chris) me - i just spent 4 days in a luxurious mountain resort hanging with one of my best friends (stef), relaxing in a hot tub, picking up rich boys, playing monopoly pinball (one of those activities is not true - figure out which one!) - it's been fun. happy new year!\n\n\n\n\nprediction: 0\ntrue label: 1\nBefore I begin let me just say that when this band first came out I thought they were pretty good. What's not to like? But after a while I started to notice something... at first I thought it was just me, but others noticed it too. All of their songs sound exactly the same!!! Can you guess the band? Give up? It's Nickelback. I found this site which accurately sums up what I am getting at. Watch and feel used. http://www.thewebshite.co.uk/nickelback.htm To summarize. Nickelback, you suck.\n\n\n\n\nprediction: 0\ntrue label: 1\nI Must Be Crazy So, here I am, less than 6 months from my first ever race (2004 Spirit of St. Louis 5K - 34:57), and now I've signed up for the Lewis & Clark Half-Marathon on Sept. 18, 2004. I'm trying to work up to the 2005 Spirit of St. Louis Marathon. Then I want to run an ultra next year, too. I think I can do it...if it's a road ultra, at least. I'm looking forward to increasing my mileage slowly. This is the start of my blog to chronicle my training to reach these goals.\n\n\n\n\nprediction: 1\ntrue label: 0\ndear susan: being away from you reminds me how much being near you just drives me away from sanity. i'd like you to never return but you are susan. much like HIV + you never completely go away. maybe one day we'll find a cure and rub it in your face and you'll dissappear until then, i'll squish your head between my thumb and index finger from a distance\n\n\n\n\nprediction: 0\ntrue label: 1\nson uf a fuckin bitch...we didn't break the fucking gator and grampa needs to get the fuck over it...if you break a radiator you will fucking know it...so we can't ride the gator cause we rode it through the pasture and broke the radiator...oh fuck off!\n\n\n\n\nprediction: 0\ntrue label: 1\nStocks on Bursa Malaysia closed in negative territory yesterday, as investors\nresorted to profit-taking after hitting historic high, while regional market\nwas weak with no overnight lead from Wall Street which was closed for a public\nholiday. The FBM KLCI fell 4.45 points or 0.28% to 1,570.04 after opening 1.43\npoints higher at 1,575.92. Market breadth was negative with losers leading\ngainers by 731 to 174 while 230 counters were unchanged. A total of 1.704\nbillion shares worth RM2.02 billion were traded against 1.907 billion shares\nvalued at RM2.201 billion on Monday.\nThe FBM KLCI opened 1.43 points higher at 1575.92 and hit the intra-day high\nof 1576.42 within the first fifteen minutes, thereafter, profit-taking\nactivities dominated the floor and pushed the key index to the intra-day low\nof 1567.91, before some late buying of selected blue-chips helped lift the key\nindex off low to close at 1570.04. Chart-wise, the FBM KLCI formed a bearish\nengulfing candlestick, a key reversal candlestick pattern, which indicates\nsellers were dominant throughout the day in pushing the key index lower.\nHence, the FBM KLCI is likely to further consolidate itself. Immediate support\nis provided by the 10-day moving average (MA) at 1568, which looks fragile,\nand the pivot low at 1558.\nMACD has turned downward, albeit still above its signal line, indicated a\nchange in the direction of the momentum and possible further correction ahead.\nRSI (14) fell to 68.8 from 72.8, indicated the market strength is reducing.\nStochastic is at 91.5 and has hooked downward, but is still above its slow\nstochastic line, reflected the correction of the key index. Signals from the\nindicators indicated possible further correction of the FBM KLCI in the near\nterm.\nThe FBM KLCI has closed below the 5-day MA but is just above the 10-day MA. If\nthe key index breaks below the 10-day MA, it is likely to slide further\nsouthward to the pivot support at 1558 and 1550. The underlying medium to\nlonger term uptrend, nonetheless, is still very much intact. Immediate\ndownside support zone is at 1566 to 1558 while the overhead resistance is at\n1577\\. The market is likely to see more profit-taking activities over the next\ntwo weeks as players lock in their pre Chinese New Year \"Ang-Pow\".\nOvernight, the Dow rose +50.55 points or +0.43% to close higher at 11,837.93.\nToday, the FBM KLCI is likely to trade within a range of 1557 to 1585.\nThis week's expected range: 1542 - 1594\nToday's expected range: 1557 - 1585\nResistance: 1575, 1580, 1585\nSupport: 1557, 1562, 1566\n\n\n\n\nprediction: 0\ntrue label: 1\nRegister Now, it's Free! You are currently viewing our boards as a guest which\ngives you limited access to view most discussions and access our other\nfeatures. By joining our **free** community you will have access to post\ntopics, communicate privately with other members (PM), respond to polls,\nupload content and access many other special features. Registration is fast,\nsimple and absolutely free so please, **join our community today**!\nIf you have any problems with the registration process or your account login,\nplease contact from posting on the forum! All you need is an Adsense Account.\nLogonym/Alphaglph Domains? What Do You Think?** Rather than register a\nnonsense domain unrelated to my subject when the keyword .com isn't available,\nI look for available logonyms, knowing I will have to brand them anyway..\nI just regged the logonym domain CELLPHQNE.COM for $5.49 tonight: You **may\nnot** post new threads\nYou\nmay not post replies\nYou **may not**\n\n\n\n\nprediction: 0\ntrue label: 1\nHome » Jobs & Internships\nFloriculture Internships for US Students, US\nOpen to:** US floriculture/environmental horticulture undegraduate students\nScholarship:** $1,500 (three-month), $3,000 (four-month), or $6,000 (six\nmonth) offers two intern scholarship programs. Applications are reviewed twice\na year. The deadline for submitting applications is **March 1** and **October\n1** each year.\nHistory and Objective\nVic & Margaret Ball Intern Scholarship Program - Training at a commercial\nproduction greenhouse or nursery. Vic Ball was the son of Ball Horticultural\nfounder George Jacob Ball. As the leading North American producer and\ndistributor of ornamental plants and their seeds Ball Horticulture has had a\ntremendous presence in the horticulture business. In 2002, Vic and Margaret\nBall made a generous donation to AFE to establish this program for students to\nreceive critical \"hands-on\" growing experiences, to help ensure successful\nfurture generations of horticulturalists.\nTraining sites are located at some of the industry's most successful businesses.\nTraining is available at floriculture production greenhouses within the United States.\nTraining must be performed in a geographic region other than the student's home or school location.\nEligibility\nFull-time, **undergraduate student**\ns who are:\nCurrently enrolled in a floriculture/environmental horticulture program at a 2- or 4-year college/university within the United States.\nU.S. citizen.**\nMaintaining a \"C\" or better GPA with satisfactory progress in a degree or certificate program.\nInternship must be completed prior to graduation.\nCosts and Benefits\nThis program allows students the opportunity to intern at commercial\nfacilities for a period of three, four or six month periods. Upon completion\nof their paid internship, the student will receive a scholarship in the amount\nof\n$1,500 (three-month), $3,000 (four-month), or $6,000 (six month).**\nTransportation costs to and from the location are the student's\nresponsibility**\n($700 advance is available for travel and relocation.)\nApplication Process\nStudents must complete the\nMarch 1** or **October 1** deadline.\nSubmit the completed **application** , the\nabove-referenced statement, and a copy of your **official college/university\ntranscript(s)** to your floricultural faculty advisor. The application and all\nsupporting documents should be signed and submitted by the student's advisor.\nHe or she will then transmit the original and eight copies, including a letter\nof endorsement, to the American Floral Endowment.\nPlease feel free to contact Debi Aker, AFE Manager at (703) 838-5211.\nAddress\nAmerican Floral Endowment**\n1601 Duke Street\nAlexandria, VA 22314\n(703) 838-5211 / Fax: (703) 838-5212 Kaczynski crash investigation: Russia\nblames Poland, Poland sees red A serious look at the emergence of mobile\ngiving: Berkman Center to Launch First Detailed Study of Mobile Giving\n\n\n\n\nprediction: 0\ntrue label: 1\nMy 7 month old's first diaper rash. Is it from the heat or the diaper?\nJanuary 20, 2011 at 12:43 pm\nIt could also be a food allergy? Sometimes the weather change can affect\nbaby's skin. I always look at the Skin Deep database (see sources) when\npicking things to put on my baby. It rates cosmetics and things based on the\ningredients.\n\n\n\n\nprediction: 0\ntrue label: 1\nMan that has undergone to prostate cancer treatment can still have sex?\n: Man that has undergone to prostate cancer treatment can still have sex? Best\nanswer:\n_Answer by cragmor_\nProbably. There is a risk of impotence when doing surgery on the prostate etc,\nbut a good chance that plumbing will still work as advertised.\nI've had no side effects and still have sex. I'm 62.\nJanuary 20, 2011 at 7:26 pm\nhi,\nmaybe you can find answer from this site that i have just found.\n\n\n\n\nprediction: 1\ntrue label: 0\nFrom the Wires\nFirst ImmerVision Enables® Camera Features Integrated Panomorph Lens.\nImmerVision announces that SANTEC, provider of video surveillance product to\nthe European market, has introduced the first ImmerVision\n_Enables_ camera worldwide with a Panomorph lens integrated as part of the\ncamera.\nThe SANTEC model SNC-P3601M is an IP-based network dome camera with 1.3\nmegapixel resolution, H.264 encoding, integrated SD-card and a frame rate of\n25 frames-per-second (FPS). The camera features an integrated Panomorph lens\ncompatible with ImmerVision\n_Enables_ recording and viewing devices to provide complete 360-degree area\nsurveillance from various simultaneous perspectives. Everything in the frame\nis visible and being recorded at all times; there are no blind spots.\n\"SANTEC's new camera is the first to offer a Panomorph lens as a standard\nfeature and the first example of an innovation likely to be embraced by many\nlarge camera manufacturers globally in the future.\" said Alessandro Gasparini,\nSVP, Sales & Marketing and CCO, ImmerVision.\n\"The ImmerVision _Enables_ camera is a perfect complement to PTZ or fixed\ncameras, expanding the capabilities of our video surveillance solution,\" said\nLars Diestel, CEO, SANTEC. \"The camera is also integrated into the SANTEC\nSanGuard software and the NUUO video management system, both certified\nImmerVision\n_Enables_. Being first to market with this highly innovative solution is an\naccomplishment we are proud to claim.\"\nImmerVision mission is to enable people to see everything around them when\nusing its patented 360° Panomorph lenses and its ImmerVision\n_Enables_ immersive viewing functionality. Leading the innovation in 360°\npanoramic imaging, ImmerVision licenses its Panomorph optical and software\ntechnology to global lens producers, product manufacturers and software\ndevelopers. Panomorph lenses are the only ones that can be adapted to any\ncamera, any sensor and any consumer, commercial and government market.\nwww.immervision.com\nSANTEC Video Technologies, located in Ahrensburg (near Hamburg, Germany), is a\ndistributor and manufacturer of professional CCTV technology. SANTEC delivers\nhigh-quality products for effective video surveillance concepts, integrated\nsystem solutions and CCTV units for commercial or private applications. SANTEC\noffers a versatile product range of recording systems, CCTV and IP cameras,\nlenses, weather-proof housings and accessories\nWire\nredistribution of Business Wire content is expressly prohibited without the\nprior written consent of Business Wire. Business Wire shall not be liable for\nany errors or delays in the content, or for any actions taken in reliance\nthereon.\nCloud Expo, Inc. Announces Cloud Expo 2011 New York Venue Cloud Expo, Inc.\nannounced on Thursday that Cloud Expo 2011 New York, the 8th International\nCloud Computing Conference & Expo, will take place June 6-9, 2011, at the\nJavits Center in New York City. The International Cloud Computing Conference &\nExpo series is the world's leading Cloud-... Croatia Yacht Charter Presents\nMagnum Nautica Magnum Nautica is the first and foremost Croatia yacht charter\ncompany. It has added a luxury charter yacht, Sunseeker Manhattan 60, named\n'Rej,' to its existing fleet of 10 luxury and charter yachts. This new\naddition to its fleet, with a capacity to accommodate 6 guests, is a great\nl... Agents To Pay For Buyer Stimulus The website, Lookflorida.com states: 'A\ngroup of Florida real estate agents have agreed to participate in the FREBS\nprogram. These agents will give you a cash stimulus at closing on your Florida\nreal estate purchase. You could be eligible for up to $150,000' The stimulus\nprogram ... Crystal's 2011 World Cruise Now Open for Booking Books are now\nopen for Crystal Cruises' 16th annual, 110-day 2011 World Cruise. Fares for\nthe \"Grand Exotic Expedition\" World Cruise aboard Crystal Serenity are now\navailable online, along with Full World Cruise and segment itinerary details,\nand attractive promotional and savings info... JetBlue Embraer ERJ-190 Flight\nFrom Chicago to New York JFK A JetBlue Embraer ERJ-190, registration N279JB\nperforming flight B6-904 from Chicago O'Hare, IL to New York JFK, NY (USA)\nwith 64 passengers and 4 crew, was on final visual approach to JFK's runway\n31R descending through 2000 feet, when the crew reported they needed to abort\nthe approa... Organic Caramel Truffles, You Sexy Thing Cloud Expo 2011 New\nYork Expands Technical Program NoSQL - The Trend for Databases in the Cloud?\nEmerging Technology Trends - Minus the Hype What Skills Do You Need for the\nCloud? The Essential Elements of a Private Cloud Cloud Expo, Inc. Announces\nCloud Expo 2011 New York Venue My Top Five Cloud Predictions for 2011: Colin\nClark Cisco Cloud Strategy, Within and Across Data Centers Copyright\n©1994-2007 SYS-CON Publications, Inc. All Rights Reserved. All marks are\ntrademarks of SYS-CON Media.\n\n\n\n\nprediction: 1\ntrue label: 0\nNow I wonder what it is like to have Steven Tyler, Simon Cowell and Paula\nAbdul as Idol judges. does anyone think Simon Cowell will ask Paula Abdul to\nbe a judge on American x factor? Simon Cowell's girl Mezghan Hussainy grins\nand bears it as they meet his ex Jackie St Clair 'American Idol' hopes changes\nboost ratings: Judge Simon Cowell -- arguably Idol's top draw -- is gone, as\nare j... http://bit.ly/fvbGN0 RT @TVChatHotline2011Simon Cowell talks Susan\nBoyle in new interview | My Blog http://bit.ly/guT92Z X Factor mogul Simon\nCowell's so miserable on yacht with fiancee Mezghan Hussainy @ellycakeface,\nlol. go see. twitter. MrKennethTong. Even Simon Cowell tweeted to him lol.\nPepsi to sponsor Simon Cowell's \"X Factor\" - Entertainment & Stars: Simon\nCowell is now a Pepsi man. The upcomin... http://bit.ly/gByO17 Photo: HOLY\nSIMON COWELL.. BET THIS TASTE REALLY GOOD http://tumblr.com/xh816pry3r\n@Jaynecollinsmac What did you get wrong... please don't say you thought Simon\nCowell was their manager!! :') Happy New Year, Jayne xx RT @NeilWhitstable:\nCouple of \"interesting local names\" in the New Year's honours list, --- Simon\nCowell's not from round your way, is he? Simon Cowell and his ladies sing X\nFactor karaoke on Christmas Day Simon Cowell rumored knighthood not looking\ngood http://bit.ly/eWNduf Cheryl Cole pays £3,000 for private airport suite\nused by the Queen The fat girl and the guy with glasses reminded me of my\nfriend when we went to see cheryl cole #NP Fight For This Love - Cheryl Cole\n- http://tweetmylast.fm/u/acd8f Cheryl Cole ecën përpara pas divorcit:\nKengetarja e grupit \"Girls Aloud\", Cheryl Cole duket se po v...\nhttp://laj.me/26527 #lajme #shqip Cheryl Cole feat August Rigo - Better To Lie\n( Official + Brand New Song) Cheryl Cole given £10,000 bracelet by Derek Hough\nas New Year present Joe McElderry - 'If This Is Goodbye' - Aspen, Colorado Ski\nFootage Leona Lewis - X Factor - I Will Always Love You Leona Lewis X Factor\nLive Stacey Solomon wants to write her own songs: The I'm A Celebrity...Get Me\nOut Of Here! champion first came to pr... http://bit.ly/h7wSEg #Twitition\nSign Stacey Solomon!!! http://twitition.com/dvkr3 @TeamSSolomon Stacey\nSolomon's twitter is all pretty bland but if you imagine her speaking the\nwords as you read, its hysterical. #humor #ff I liked a YouTube video --\nstacey solomon x factor http://youtu.be/_hCa59bTxdY?a LOOOL About you\nsounding like stacey solomon question, Wow all people from essex are now\ngetting compar… - haha xx http://4ms.me/h3bzl7 RT @SussexNewspaper: Stacey\nSolomon in Sussex break http://bit.ly/fMW7lF Nancy Sorrell's voice is f**kin\nawful.. I thought my voice was deep but hers is ridiculous! She sounds like a\nmale version of Stacey Solomon Stacey Solomon winner of I'm a Celebrity get me\nout of here 2010 Here's a lunchime treat for you - Matt gives us an exclusive\ninterview shortly after winning last night:\nhttp://xfactor.itv.com/2010/videos/video/watch_backstage-after-the-\nresults_item_201717.htm The X Factor\nHere's a lunchime treat for you - Matt gives us an exclusive interview shortly\nafter winning last night:\nhttp://xfactor.itv.com/2010/videos/video/watch_backstage-after-the-\nresults_item_201717.htm\n\n\n\n\nprediction: 0\ntrue label: 1\nAllen Jeremias, king L. emancipationist - Cardiac Intensive Care, 2nd\nEdition**\nS a _.nd _.rs | ISBN: 141603773X | 2010-03-23 | PDF | 736 pages | 24.60 Mb\nThe newborn edition of Cardiac Intensive Care-the exclusive aggregation sacred\nto cardiac qualifier tending medicine-chronicles the advancement prefabricated\nin the diagnosis, assessment, and communication of patients with grave cardiac\nillness.\nEditors comedienne Jeremias, MD, MSc and king L. Brown, MD inform the occasion\ndiscoveries, greater discernment of syndromes, and subject advancements that\nhit helped attain clinical cardiology a proportional and interventional field.\nYou'll intend news of the excess of noncoronary diseases in the CICU, as\nsubstantially as a rank publication of up-to-date medicine agents. The newborn\nfull-color organisation and layout and figure newborn chapters provide you the\nstylish theoretical, technical, diagnostic, and therapeutic advances in an\nreachable and visually attractive format. Moreover, the flooded aggregation is\nacquirable online at expertconsult.com.\n\\- Features the official perspectives of a stellar assemble of contributors-\nmany of whom are the pioneers in the fields they cover-for the prizewinning\nacquirable guidance.\n\\- Provides the base power support for the clinical touchable finished a\ncountry on the technological groundwork of cardiac qualifier tending to\nprovide you the rank picture.\n\\- Presents a medicine launching to the classes of drugs so you undergo which\nare most commonly utilised in the CICU.\n\\- Covers which noncoronary diseases ofttimes termination in entering to the\nCICU to educate you for those diagnoses that are not of a cardiac nature.\n\\- Offers the lavatory of full-text online admittance at expertconsult.com.\n\\- Features figure newborn chapters-Quality Assurance and Improvement in the\nCardiac Intensive Care Unit; Physical Examination in the CICU; Mechanical\nTreatments for Acute ST-Elevation MI; Non-ST Elevation Myocardial Infarction:\nDiagnosis, Prognosis, Risk Stratification, and Management; Glycoprotein\nIIb/IIIa Inhibitors; Vascular Access Procedures; Ventilator Management for the\nCardiac Patient; Management of Post-Operative Complications in the Cardiac\nSurgery Patient; Guidelines Relevant to Care in the Cardiac Intensive Care\nUnit-to ready the aggregation and you up to date.\n\\- Presents the aggregation in a new, full-color organisation and layout for a\nmore visually-appealing and reachable info that makes uncovering the\naggregation you requirement hurried and easy. //\nhttp://avaxhome.ws/ebooks/Cardiac_Intensive_Care_repost.html\n\n\n\n\nprediction: 0\ntrue label: 1\nAfter latest Gayus drama, House to summon task force, Gayus - Jakarta Post\nWarner Bros Sets 'Gangster Squad' Scribe For 'Lethal Weapon' Franchise Reboot\n- Deadline.com Yorkshire Post joins Mafia social network at top event -\nYorkshire Post Mafia-link cigarette smuggling gang jailed for over 30 years -\nYorkshire Post **After latest Gayus drama, House to summon task force, Gayus**\nHouse deputy speaker Pramono Anung said the House of Representatives intends\nto summon graft convict Gayus H. Tambunan and Judicial We will focus on\npursuing tax **mafia** , not verifying Gayus tirades: Taskforce\n\n\n\n\nprediction: 0\ntrue label: 1\nWHAT IS MISSING FROM THIS PICTURE…ER ARTICLE?\nhttp://www.nytimes.com/2011/01/19/realestate/commercial/19space.html?_r=1&src=twrhp\nTell me the design firm's name. What is the name of the interior designer who\nprovided expert advice for the article? Which interior design professional\norganization was cited as a source of information?\nGive? The answer is nada, zip, zero, zilch, no ID no where no how. This is not\nthe Poughkeepsie Journal folks it's the New York times and the article is not\nabout some interior decorator's inflated opinion of the latest boudoir trend\ndu jour. It is about high level commercial interior space. Now\nPROFESSIONALINTERIORDESIGNER realizes that when it comes to the numbers aspect\nof corporate real estate the commercial real estate and program manager power\nbrokers rule that realm. But come on- they do not create the space in which\nthe bean counters continue to comfortably shoehorn more and more\ndisenfranchised clock punchers- we do. At a minimum the design firm\nresponsible for Intel's transformation should have been acknowledged.\nWe need to ask ourselves \"selves….why do we not get any respect?\"\n\n\n\n\nprediction: 0\ntrue label: 1\nJOHIM takes over management of new global equity fund\nAdd a commentAdded 20 January 2011 by Gary Corcoran, group editor, Portfolio\nAdviser\nJO Hambro Investment Management has taken over Spencer House Capital\nManagement and will become the new manager of the SHCM Fund, a Ucits umbrella\ncompany.**\nCharles Martyn-Hemphill and Will Kenney of SHCM will join JOHIM and are\nexpected to take their positions in February.\nCurrently, SHCM is the investment manager of the SHCM Fund which is regulated\nby the Central Bank of Ireland as a Ucits proposition listed on the Irish\nStock Exchange. It has one sub fund, the SHCM Global Equity Fund, an\nunregulated fund that will be renamed as the JOHIM Global Investment Fund with\nJOHIM as its new investment manager.\nMartyn-Hemphill and Kenney will remain as managers of the fund as well as\nother segregated global equity mandates for private clients. JOHIM will\ncontinue to run its own global equity fund, managed by Jenny Fisher.\nBefore joining SHCM as a founding partner in 2006, Martyn-Hemphill spent 26\nyears at Morgan Grenfell and Deutsche Asset Management where his most recent\nposition was managing director and head of the global select equity team.\nMORE WAYS TO GET Portfolio Adviser\nSubscription\nE-magazine\nSee the magazine exactly how it was printed and download custom PDFs.\n\n\n\n\nprediction: 0\ntrue label: 1\nI love technology, but it can be a real pain sometimes. I have been trying to\nupload new photos onto my computer so I can put them up on my blog. Well, I\nhave had to do it at work. The first day I brought my computer to work, I\nforgot the USB cable. Nice. So today I grabbed the whole box from when I\nbought my camera, which should contain the USB cable. It was especially slow\nat work today, so I took out the box to go over the manual and add my pixs. I\nopen the box, no USB! I know it was there when I bought it and I am now\nremembering that it is attached to my adapter that I use to charge the\nbattery! Really? So, I think, \"Okay, I'll just put my memory card into my\nprinter at work and get them that way.\" I put the memory card in the printer\nand get an error message saying \"memory card error\". Now this is just getting\nridiculous!\nI think I will wait until this week is over to post pixs and get \"caught up\"\nwith my techno stuff. I will be done with work after this week so I'm thinkin'\nyeah.\n\n\n\n\nprediction: 0\ntrue label: 1\nThe RAF has marked the end of service for its oldest aircraft with a final\nflypast in Lincolnshire .\nThe Hawker Siddeley 125 Dominie aircraft is to retire from service at the end\nof January.\nOperated by 55 (Reserve) Squadron at RAF Cranwell in Lincolnshire, the Dominie\nprovides training for all rear crew in the RAF.\nIt has been in service for more than 45 years, and was originally procured in\nthe 1960s to train Navigators, particularly for the V Force.\nBut the cancellation of the Nimrod MRA4 and a reduction in the number of\nTornado GR4s means the RAF will no longer train Weapons System Officers,\nformerly navigators, after the current course graduates.\nThe RAF said 22 aircraft were built initially, with 55 Squadron operating the\nlast remaining seven.\nOn Thursday six Dominies flew in formation out of RAF Cranwell then carried\nout final flypasts over the base to mark the end of its service.\n\n\n\n\nprediction: 0\ntrue label: 1\nKulikova leads 3*-2 in the 1st set when I lay her at 1.16. Can't be having\nthat price so early. A lay!! I know! Hope Scheepers can get a break back.\nOooh deuce from 40-15! BP and I'm out thankyou very much Chanelle. FFS not\ntaken at 1.23!!! now deuce again! GP ... Deuce getting out at 1.20 ... aaargh\nffs market take my bets!!! Still on original bet! ... GP ... deuce and finally\nmatched at 1.20!!\n£1.98 on both.\nGrrrrr now BP ... Deuce ... BP #3 Break!! FFS could have got mech better still\nit's a 20% return for a couple of minutes work.\nScheepers leads 3-5* now! Kulikova out to 1.85! should have held my bet!\nHindsight eh?\n\n\n\n\nprediction: 0\ntrue label: 1\nMiscellaneous » Overcoming the Disadvantages of Dedicated Hosting | Hobgit Web\n… Loading ...\nWednesday, January 19, 2011, 17:36\nhosting offerings, you might be wondering what you will be missing out on if\nyou stick with shared web hosting.\nSantiago has written 1656 stories on this site.\nA Blackberry is a wireless handheld device which supports email, text\nmessaging, web browsing and other wireless information services Tell us a\nlittle about your background. What industry experience you have? And what did\nyou do before your company Pitney Bowes International Holdings, Inc. (\"PBIH\")\ntoday announced that it has extended the early tender date for its previously\nannounced tender offers (the \"Tender Offers\") for its Variable Term Voting\nPreferred Stock, Citirx Synergy 2010 announces 1st Citrix Xenserver Private\nCloud Appliance, Deliver the Cloud Not complexity. worldcloud.com introduces\nexhibit its cloud technologies for FileMaker Hosting. DedicatedNOW,\neSecureData, NETRACKservers & Online Tech wins Top Dedicated Server Hosting\nAwards ServerBeach intrduces fully configure MongoDB dedicated servers.\n\n\n\n\nprediction: 0\ntrue label: 1\nmeghannmadruga: tea lattes were on half price this week so I caved and bought\none\nbut Ive been good about not buying unnecessary beverages this year\nmake sure you credit me so I can ride the gravy train Aaron Philips sings on\n\"Unnecessary Beverage\" like acai berry is the name of a rapper\nI'm Aaron. **\nI lived in Chicago, then Anchorage, then DC, then Oahu. Now I live on the\nBig Island of Hawaii. I'm a writer, actor, marine naturalist, and all around\ncreative type person.\nThis is my place for cool stuff I make, write, photograph, film, and find.\nI also do Marine Biology Education. I'm a big nerd who likes whales.\nI enjoy satire, talking animals, and cephalopods.\nI work as a Naturalist on a whale watch boat here in Kona, and on the weekends\nI hike, scuba dive, explore, and seek out the island's marine inhabitants. All\nphotos are mine unless a click through source is provided.\n\n\n\n\nprediction: 0\ntrue label: 1\nThis past Sunday at the Club was an exciting day! We had elder commissioning,\ncommunion, corporate worship and parent commissioning. This was our first\nparent commissioning at West Club. You may be familiar with this, but might\nhave called it something like baby dedication in the past. I wanted to share\nthe pics of our Club Kids and the scripture passages that their parents have\nasked you to pray for them. As you can see, we had one handsome man surrounded\nby several lovely ladies.\nYou could play a major role in these kids learning about Jesus! You could be\ntheir small group leader or assistant small group leader on Sunday mornings.\nWe are welcoming new volunteers this Saturday at an event called\nCalliah Grace O'Brien\nEphesians 3:14-21\nP\nPayton Dawn Parker\nPhilippians 3:7-9\nLocation:** 2031 West Club Blvd\nService Times:** : Frontline - Training for new volunteers\nSun 1/30/2011** : Baptism & New Member Commissioning\n\n\n\n\nprediction: 0\ntrue label: 1\nThe most extensive deposits of eolianite in the world are located on the\nsouthern and western coasts of) was a watch-maker inventor musician politician\nfugitive spy publisher arms-dealer and revolutionary (both French and\nAmerican) He was best known however for his theatrical works especially the\nthree Figaro playsAlexandra Petrova was shot and killed in 2000 by an\nunidentified gunman in her apartment at the Volga River city ofTo enable the\nMumbai Suburban Railway to meet the demands of the ever-growing passenger\ntraffic the federal\n\n\n\n\nprediction: 0\ntrue label: 1\nHad a look at the online e-conference that was held over the last couple of\ndays. A novice like me found it hard to negotiate. Seemed to be lots of\nnetworking ( read wasting time saying hello etc ) but I did read one of the\nkeynote speaker's words. She said that teachers who didn't embrace the new\ntechnology were like doctors who didn't keep up with the latest medical\nadvances. A bit extreme or accurate??\n\n\n\n\nprediction: 0\ntrue label: 1\nhere;s your change to own a really clean Suzuki Grand Vitara V6 automatic. The\nvehicle was purchased from the original owner 2 years ago, and was always\nmaintaned properly.\nit has only 77000 miles, all original. Every service has been done by NAPA\ncertified shop.\nI has been recently serviced, including a brand new air conditioning system.\nplease email me or call for details.\nIt belongs to my sister who just purchased a Mini cooper so she has no need\nfor this great vehicle. The interior is perfect, from a non smoker.\nClean and clear Florida title.\nDon't miss the chance to own a really good vehicle that will be trouble free\nfor many years to come.\nthanks for your interest\nEmilio\[email protected]\n\n\n\n\nprediction: 0\ntrue label: 1\nThis is a Melanie's Mall set. It includes a mall with a moving elevator, coffee shop, game center, shoe and perfume store. It also has a catwalk with lights that turn on and a moving walker. There is also a make-up station and a mirror to go with it. It comes with 2 dolls and accesories. I'm asking 15 dollars for it email [email protected]\n\n\n\n\nprediction: 0\ntrue label: 1\n1999 Chevy: Corvette !!! L@@@K\nThis Vette is in excellent shape and is ready to be driven and enjoyed.\nALL POWER EQUIPMENT INCLUDING THE POWER WINDOWS AND LOCKS, POWER MIRRORS AND\nTHE POWER SLIDING WORK CORRECTLY.\nThe seats are in good condition. The rest of the interior looks great and the\ncar smells fresh and clean. The head liner has no stains or any rips or tares.\nEmail me for all questions - [email protected]\nUnder the direction of George Parks the 300-member marching band will be\ncomposed of select high school band students from across the country Parks is\nthe director of theAxelrod's directorial efforts ( Lord Love a Duck 1966 The\nSecret Life of an American Wife 1968) though equally superb have unfortunately\nbeen overlooked After a decade hiatus he returned to film work in 1979\nproviding the screenplay for the remake of The Lady Vanishes Subsequent\ncontributions include the scripts for Frankenheimer's The Holcroft Covenant\n(1985) and The Fourth Protocol (1987)This link is sometimes surgically severed\nto control severe seizures in epilepsy patients This procedure was first\nperformed byGreg Richardson 1958 1991 Bantamweight Biography Boxing Boxrec com\nFebruary 25 February 7 Joichiro Tatsuyoshi List of WBC world champions /\n\n\n\n\nprediction: 1\ntrue label: 0\nwhy can't i post today? and why is ross reading the sun? or at least, looking at page 3. you enlightened renaissance man, you. fonz and i are going to be golf-ninjas very soon. why do i keep trying to spell ninjas with a g? maybe the ginger ones are ningas. someone get me the new soulwax album that isn't out yet.\n\n\n\n\nprediction: 1\ntrue label: 0\nok, so i got to cook tonight with a chef in training and a self-trained chef- and i enjoyed every minute.....they made a total mess of the kitchen- and scented the air permanently with the smell of fried grease- but the meal was great and the company was good. i will now stack my food rather than arrange it side-by-side..........\n\n\n\n\nprediction: 1\ntrue label: 0\nHello all! Greetings! I'm back home! its good to be back. but its also saddening to be back since ill miss my host family b/c theyre all awsome. Japan rocked my socks off! friends- we'll have to go crusing around alrighty? Just give me a call (402)421-1061 ask for me! all you yfu buddies you guys are awsome. we had some crazy times with the marines on the plane!\n\n\n\n\nprediction: 0\ntrue label: 1\nI miss Kelly. A lot. She's my homie. And in the Group. Edie, I'm sorry Thomas din't call you. Rosalie, I'm sorry you had a bad other day. I know both of ya, you'll make it out fine. Anyway, around 7:15 Poncho came over to grab some Airsoft beebs, he wanted some more. That dude I bought my FAULTY gun from will give me a refund, but only through SquareTrade which costs $20.00 and my gun was $27.99, so FTW? (Fuck the WHAT?) It's a Counter-Strike thing. Lizz, Katie's friend lives here in FoCo, and she don't mountain bike. Another FTW. Melanie owes me a dollar. I miss Kelly. Life isn't bad. -beej\n\n\n\n\nprediction: 0\ntrue label: 1\nI might try and download that album when I get the chance. I've just been over to the canteen for a cocked breakfast. Cheeky or what? Dave, did you remember to leave you keys with the man on the front desk? And if not, what should I do? I'm bringing my golf bag and clubs up with me, and I'm gonna ninja your ass on the golf course.\n\n\n\n\nprediction: 0\ntrue label: 1\nThis is my first proper post to my new Movable Type blog.\nIt feels weird.\nI feel weird.\nI haven't blogged in a really long time, and I don't feel like really blogging the way I used to. I must practice \"discretion.\" That's the word.\nWhy the hell I have to practice \"discretion\" for, anyhow? Well, because apparently, in discretions get people into trouble. Have yet to encounter indiscreet troubles, but I have been assured that they exist.\nBeing prudent, if not discreet, I bumble along, trying to figure out how to practice \"discreet blogging.\"\nAt least it's an exciting new challenge....\n\n\n\n\nprediction: 0\ntrue label: 1\nWhy? I'll just do what I do every year - concentrate on my team for three weeks before forgetting about it, only to be reminded that I was playing when the final results come in and I'm somewhere near the bottom. I'm pretty sure I've never even made a transfer in fantasy football, because I never pay attention long enough to need to.\n\n\n\n\nprediction: 1\ntrue label: 0\nsome pick-up lines i heard on family guy: you must be a parking ticket because you've got \"fine\" written all over you. (to two girls talking to each other) pardon me but i don't want to come between you two. or do i? if you don't understand that last one, just think about it and it'll come to you... haha\n\n\n\n\nprediction: 0\ntrue label: 1\nWhat a nice weekend! I didnât do anything special but it was nice nonetheless. Today Andrew and I went and saw Spiderman 2 which was really good. I think it was better than the first one despite Kirstenâs attempt at acting again (I am so not a Kristen Dunst fan). Anyway, I highly recommend seeing it in the theaters before it leaves because of some great action sequences. Afterwards, we trotted over to Copley and did a little shopping. Andrew got a really great tie for super cheap; I got some naughties at Victoriaâs Secret and then my cool new laptop bag at Sony that I mentioned the other day. I justified the cost of it because I did a really cool thing. You see I bought my laptop in Mass last weekend and we got our TV in NH. I decided that I was driving to NH to return and buy back my laptop in order to save $100 in taxes. When I called to make sure that I could do that, the girl just went ahead and did it over the phone! I got back $100! Score! So I decided to buy that bag I wanted. Here it is: I went with green because #1 Iâm really into green right now and #2 it was the only cute one that my laptop would fit into to. So I am really happy with it and itâs really hip and light with my computer in it. After our little shopping spree, we headed over to PF Changâs for dinner for some Changâs Spicy Chicken and Moo Shoo Chicken. It was very yummy. :o) Good evening!\n\n\n\n\nprediction: 1\ntrue label: 0\nWhy can't it rain? All I'm asking is for a little rain just to cool it down a bit! Only a very quick post tonight as it's too hot to be sat in here. The good news: my car will be ready on Friday, or Saturday at the latest, I can't wait. Of course by then you just know it's going to be raining! Well that was all I have to say for myself right now, I'm off to find something cold to drink! :)\n\n\n\n\nprediction: 1\ntrue label: 0\nJust a quick post since I've not written for a few days; The past week has been really tiring, work has been mad (I'll have to work again this Saturday), and I've not really spent as much time as I should have on my own projects. That said I'm not even going to think about them tonight, my plans are nothing more than getting a nice cool drink (cherry coke) and watching a movie. Whatever you people are up to tonight, make sure your having fun doing it :) Good night!\n\n\n\n\nprediction: 1\ntrue label: 0\nHaiz... now doin our MWA integrated project in the library with jean and gack... haiz... jus now alot of 'distraction'... till it jus gone... haha... dun wanna say it... anyway, did a few animations... then jean attitude... wanna go home... go wad home la.. knn... lol keep shutting down his stupid lappy... cfm got those xxx stuff dun let us see de loh.... wa lau... i remember tat time he dl clips till tio pop-ups... muahahah... then tio until damn hiong... ahaha.... even got popup blocker oso cannot make it... haha now his comp got AD AWARE~! dl-ed from a porn site.. lol... dunno still have anot... lol... anyway, he is beside me now... hahahahaha...\n\n\n\n\nprediction: 1\ntrue label: 0\nWoAh... SuPrIsIng Sia ToDaY... SlEpT aT 3:30 am \"JuSt nOw\", WoKe Up At 10+... haha mIrAcLe SiA... i ExPectInG tO sLp TiLl LaTe AfTeRnOoN sIa... AnYwAy, gOtTa Go pIa My PeRSoNaL wEbBiE le... ArGh... So MuCh ThInGs On My miND NoW... BudDeN duNnO wAd to Do 1St~ LiStEnIn tO rEn ShEn hAi HaI by Wu YuE tiAn... HaaH i thInK i gO pIa mY PeRsOnAl WeBbIe lE... be BaCk To BlOg LatER\n\n\n\n\nprediction: 1\ntrue label: 0\n(another mm reference in the title for all you newfound glory fans out there) so sarah, michael, rachel and i all went to see spider-man 2 yesterday, i don't think there was a single scene in it where the characters weren't crying... but yeah anyway the movie was good, the company was better, afterwards we went to starbucks, then the park, all good, sarah and i had a little talk and we're exclusive now, once again all good. i'm as happy as i am tired\n\n\n\n\nprediction: 0\ntrue label: 1\nUsing the stats from several laboratories from Europe, Americas and Australia,\nwe need to define relationships between heart diseases and other factors such\nas smoking, atmospheric pressure, presence of other inherited diseases.\nData relationships have to be defined on purely mathematical basis, using\ndifferent regressional and other models.\n\n\n\n\nprediction: 0\ntrue label: 1\nImproved performance from a class-based inheritance system separate from the\nprototype-based inheritance system(the registry of one Tigres player had\nproblems) and the game and two others had to be re-matched on the league (the\nother ClFlash Lite is the Flash technology specifically developed for mobile\nphones and consumer electronics devices Supports Flash 4 ActionScriptsinger\nThis incident was caught on video camera and is now a popular internet\ndownload Danzig has not played in the area since this beating\n\n\n\n\nprediction: 0\ntrue label: 1\nSearching for established Marketers looking to aggressively move with VERY\nunique, science based company. Our research is very intense, and very far\nahead of ANY other company....and we have an impact product that is one-of-a-\nkind. $79 million in sales last year, doubling our sales each year. NOW is the\ntime to at least take a look.\nWe are not a \"juice company\", and we are not a \"vitamin company\"...this is DNA\nlevel renewal you will not find anywhere else, guaranteed, and patented.\nFinancially solid, best comp in industry, on track to $1 billion by 2012. New\nYork is virtually untapped.\nCall Mike @ 209.223.2200\n\n\n\n\nprediction: 0\ntrue label: 1\nI would like to give away one of these scissor fobs. You can guess what\npattern I am working on by the picture below and leave your guess in the\ncomments. I will pull a name from all of the correct guesses and announce a\nwinner on Sunday, Sept. 9, at 12:00pm. The winner can choose which color fob\nthey would like by clicking here. You can click on the picture to make it\nlarger.\nmystery-dress.jpg\nHint: This is an heirloom sewing pattern and can be found here or here.\nI guess all the HS&S ladies will know lol.\nLet the games begin!!\n\n\n\n\nprediction: 0\ntrue label: 1\nthree point trailer hitch picturesI want to fabricate a trailer hitch to work off my three\npoint hitch. I have a fifth wheel that I want to move about and a pop up. I was wondering if\nthere are similar draw bar photos out there to share? Here are pictures that I took several\nyears ago at an RV dealer. I have an old mopar frame that I will cannibalize to get started\non. Any photos out there? Inside the channel is a pipe so that the pin does not get hooked up\non a raw edge.\n\n\n\n\nprediction: 1\ntrue label: 0\n2010 Releases / Review: Phish - Live at the Legendary Alpine Valley Music\nTheatre\nOn August 14th and 15th, 2010 Phish played at Alpine Valley Music Theater. The\nAugust 14th show is the focus of the\n_Alpine Valley DVD_ , which was released on December 14, 2010.\nThe box set features the complete August 14th, 2010 performance on 2-DVDs, as\nwell as 2-CDs, and bonus video featuring highlights from the following night,\nincluding \"AC/DC Bag,\" \"Divided Sky\" and \"Stealing Time From The Faulty Plan.\"\nThe three-song combination that opened set two of the 15th rounds out the\nbonus material with \"Ghost,\" \"Theme From The Bottom,\"and \"Big Black Furry\nCreature From Mars.\"\nBut enough with the marketing bullshit. Quite simply stated this set rocks.\nThe audio of the recording is superb. It was recorded using 57 channels of\ndigital multitrack and mixed and mastered in 5.1 Dolby surround and PCM\nstereo. In short, it sounds great. And if you turn it up very loud, it sounds\neven better. In fact, it probably sounds better than if you were at the show.\nThe video was a 7-camera shoot, which helps keep the visuals as interesting as\nthe audio because, let's face it, Phish doesn't use pyrotechnics, and Trey\nAnastasio is not known for jumping around he stage like a lunatic. The\nmultiple cameras provide terrific perspective of Phish doing what they do\nbest, playing their asses off and having a great time doing it.\nThe band was on fire during this performance, which is what really makes this\nset a worthwhile viewing and listening experience. It wasn't a flawless\nexperience by any means, but these guys play, jam, improvise, and play the\nhell out of their music. It's a real treat to see and hear Phish play live.\n_Live at the Legendary Alpine Valley Music Theatre_\nis one of the best CD/DVD sets I've watched in quite a while.\nTube\nThe Oh Kee Pa Ceremony\nSuzy Greenberg\nFunky Bitch\nReba\nFuck Your Face\nAlaska\nBack on the Train\nWhen the Circus Comes\nLawn Boy\nSparkle\nGumbo\nRun Like an Antelope\nDown with Disease\nWhat's the Use\nScent of a Mule\nMike's Song\nDirt\nSneakin' Sally Thru the Alley\nWeekapaug Groove\nBug\nQuinn the Eskimo\nThe Oh Kee Pa Ceremony\nSuzy Greenberg\nFunky Bitch\nBack on the Train\nTaste\nWhen the Circus Comes\nLawn Boy\nSparkle\nGumbo\nRun Like an Antelope\nAC/DC Bag\nOn Your Way Down\nDivided Sky\nStealing Time from the Faulty Plan\nDavid Bowie\nDown with Disease\nWhat's the Use\nScent of a Mule\nMike's Song\nDirt\nSneakin' Sally Thru the Alley\nWeekapaug Groove\nBug\nQuinn the Eskimo\n\n\n\n\nprediction: 0\ntrue label: 1\n⇐ Know-Oracle : Featured Blog Reader\nRecently I was mulling over what is meaning of different terms we use in\ndifferent context, are they same or have some different understanding. Want to\nknow what you think on, do share your view\nEither put them in Comment or send me in email, i will re-publish them with\narticle\n⇐ Know-Oracle : Featured Blog Reader Like\nBe the first to like this post.\nAutoInvoice Program Overview in Oracle Applications - Receivables Oracle EBS-\nRegularly Asked Interview Question - Good for Beginners Oracle - Technical FAQ\n- Part 4 (via Shivmohan Purohit's Oracle Applications Blog) 3 days ago\nAnyone, who can give training on \"Noetix\" in India ?\nHi Bob, I am sure these are FREE and no spam or cross selling !! 4 days ago\nDebunking The Startup Myth: Finding The Right Team « Marketing Startups:\nGet your Email Subscribed for Latest Articles in your INBOX\n\n\n\n\nprediction: 0\ntrue label: 1\nChina rights activists hope Obama won't disappoint (AFP)\nAFP - The wife of missing Chinese human rights lawyer Gao Zhisheng fought back\ntears Wednesday as she spoke of her children's pain living without their\nfather, who disappeared in April 2010\\. BUSINESS\nThe year of the rabbit in the Chinese lunar calendar begins next month, and\nfor investors in Hong Kong,...\nSouth Korea Uses Silicon Valley's Firetide to Become the Most Advanced 'Smart\nCountry' in the World BUSINESS\nSEOUL, South Korea & LOS GATOS, Calif.-(BUSINESS WIRE)-South Korea Uses\nSilicon Valley's Firetide to Become the Most Advanced 'Smart...\nBUSINESS\nAn Indonesian official at the center of a corruption scandal that has shocked\nthe country with tales of dirty...\nMarket Vectors® to Split Shares of Indonesia Index ETF Three for One BUSINESS\nNEW YORK-(BUSINESS WIRE)-Van Eck Global announced today that the Board of\nTrustees of the Market Vectors ETF Trust has...\n\n\n\n\nprediction: 0\ntrue label: 1\nNational security expert Dr. Sebastian Gorka said mainstream media criticism of President Trump's executive order barring immigration from several Middle Eastern nations is a product of their personal ideologies.\n\"They're just the chattering classes,\" Gorka said, \"It's all about ideology and not national security.\"\nImmigration Ban Protest Organizer Spars With Tucker: 'Democracy is Messy'\nTrump Fires Acting AG Yates for Defying Refugee Ban\nGorka said many in the media have refused to acknowledge that, despite calling the order a \"Muslim ban\", key countries like Saudi Arabia and Indonesia are not listed.\n\n\n\n\n0.9309090909090909\n"
],
[
"bestParams = {'C': 6, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0}",
"_____no_output_____"
],
[
"clf = LinearSVC(**bestParams)",
"_____no_output_____"
],
[
"clf.fit(X, y)",
"_____no_output_____"
],
[
"best = clf",
"_____no_output_____"
],
[
"import pickle",
"_____no_output_____"
],
[
"serialize(clf, \"best.pickle\")",
"_____no_output_____"
],
[
"best = deserialize(\"best.pickle\")",
"_____no_output_____"
],
[
"s",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a17c22cde4097cb78788ce9ee054f22cd44331a
| 14,751 |
ipynb
|
Jupyter Notebook
|
content/notebook/.ipynb_checkpoints/brython-checkpoint.ipynb
|
coursemdetw/reveal2
|
3529ce4b1b59f6f9c668569640e05814c0e6a232
|
[
"MIT"
] | null | null | null |
content/notebook/.ipynb_checkpoints/brython-checkpoint.ipynb
|
coursemdetw/reveal2
|
3529ce4b1b59f6f9c668569640e05814c0e6a232
|
[
"MIT"
] | null | null | null |
content/notebook/.ipynb_checkpoints/brython-checkpoint.ipynb
|
coursemdetw/reveal2
|
3529ce4b1b59f6f9c668569640e05814c0e6a232
|
[
"MIT"
] | null | null | null | 29.502 | 143 | 0.501525 |
[
[
[
"https://github.com/kikocorreoso/brythonmagic\n\nhttp://nbviewer.ipython.org/github/kikocorreoso/brythonmagic/blob/master/notebooks/Brython%20usage%20in%20the%20IPython%20notebook.ipynb",
"_____no_output_____"
]
],
[
[
"import IPython\nIPython.version_info",
"_____no_output_____"
],
[
"%install_ext https://raw.github.com/kikocorreoso/brythonmagic/master/brythonmagic.py",
"_____no_output_____"
],
[
"%load_ext brythonmagic",
"_____no_output_____"
],
[
"%%HTML\n<script type=\"text/javascript\" src=\"https://brython.info/src/brython_dist.js\"></script>",
"_____no_output_____"
],
[
"%%brython -c my_container\n# 假如要列出所產生的 html 則使用 -p\nfrom browser import doc, html\n\n# This will be printed in the js console of your browser\nprint('Hello world!')\n\n# This will be printed in the container div on the output below\ndoc[\"my_container\"] <= html.P(\"文字位於 div 標註內\", \n style = {\"backgroundColor\": \"cyan\"})",
"_____no_output_____"
],
[
"%%brython\nfrom browser import alert\n\nalert('Hello world!, Welcome to the brythonmagic!')",
"_____no_output_____"
],
[
"%%brython -c simple_example\nfrom browser import doc, html\n\nfor i in range(10):\n doc[\"simple_example\"] <= html.P(i)",
"_____no_output_____"
],
[
"%%brython -c table\nfrom browser import doc, html\n\ntable = html.TABLE()\n\nfor i in range(10):\n color = ['cyan','#dddddd'] * 5\n table <= html.TR(\n html.TD(str(i+1) + ' x 2 =', style = {'backgroundColor':color[i]}) + \n html.TD((i+1)*2, style = {'backgroundColor':color[i]}))\ndoc['table'] <= table",
"_____no_output_____"
],
[
"%%brython -c canvas_example\nfrom browser.timer import request_animation_frame as raf\nfrom browser.timer import cancel_animation_frame as caf\nfrom browser import doc, html\nfrom time import time\nimport math\n\n# First we create a table to insert the elements\ntable = html.TABLE(cellpadding = 10)\nbtn_anim = html.BUTTON('Animate', Id=\"btn-anim\", type=\"button\")\nbtn_stop = html.BUTTON('Stop', Id=\"btn-stop\", type=\"button\")\ncnvs = html.CANVAS(Id=\"raf-canvas\", width=256, height=256)\n\ntable <= html.TR(html.TD(btn_anim + btn_stop) +\n html.TD(cnvs))\n\ndoc['canvas_example'] <= table\n# Now we access the canvas context\nctx = doc['raf-canvas'].getContext( '2d' ) \n\n# And we create several functions in charge to animate and stop the draw animation\ntoggle = True\n\ndef draw():\n t = time() * 3\n x = math.sin(t) * 96 + 128\n y = math.cos(t * 0.9) * 96 + 128\n global toggle\n if toggle:\n toggle = False\n else:\n toggle = True\n ctx.fillStyle = 'rgb(200,200,20)' if toggle else 'rgb(20,20,200)'\n ctx.beginPath()\n ctx.arc( x, y, 6, 0, math.pi * 2, True)\n ctx.closePath()\n ctx.fill()\n\ndef animate(i):\n global id\n id = raf(animate)\n draw()\n\ndef stop(i):\n global id\n print(id)\n caf(id)\n\ndoc[\"btn-anim\"].bind(\"click\", animate)\ndoc[\"btn-stop\"].bind(\"click\", stop)",
"_____no_output_____"
],
[
"%%HTML\n<script type=\"text/javascript\" src=\"https://cdnjs.cloudflare.com/ajax/libs/d3/3.5.6/d3.js\"></script>",
"_____no_output_____"
],
[
"%%brython -c simple_d3\nfrom browser import window, document, html\nfrom javascript import JSObject\n\nd3 = window.d3\n\ncontainer = JSObject(d3.select(\"#simple_d3\"))\nsvg = container.append(\"svg\").attr(\"width\", 100).attr(\"height\", 100)\ncircle1 = svg.append(\"circle\").style(\"stroke\", \"gray\").style(\"fill\", \"gray\").attr(\"r\", 40)\ncircle1.attr(\"cx\", 50).attr(\"cy\", 50).attr(\"id\", \"mycircle\")\n\ncircle2 = svg.append(\"circle\").style(\"stroke\", \"gray\").style(\"fill\", \"white\").attr(\"r\", 20)\ncircle2.attr(\"cx\", 50).attr(\"cy\", 50)\n\ndef over(ev):\n document[\"mycircle\"].style.fill = \"blue\"\n\ndef out(ev):\n document[\"mycircle\"].style.fill = \"gray\"\n\ndocument[\"mycircle\"].bind(\"mouseover\", over)\ndocument[\"mycircle\"].bind(\"mouseout\", out)",
"_____no_output_____"
],
[
"%%brython -c manipulating\nfrom browser import document, html\n\ndef hide(ev):\n divs = document.get(selector = 'div.input')\n for div in divs:\n div.style.display = \"none\"\n\ndef show(ev):\n divs = document.get(selector = 'div.input')\n for div in divs:\n div.style.display = \"inherit\"\n\ndocument[\"manipulating\"] <= html.BUTTON('Hide code cells', Id=\"btn-hide\")\ndocument[\"btn-hide\"].bind(\"click\", hide)\n\ndocument[\"manipulating\"] <= html.BUTTON('Show code cells', Id=\"btn-show\")\ndocument[\"btn-show\"].bind(\"click\", show)",
"_____no_output_____"
],
[
"from random import randint\n\nn = 100\nx = [randint(0,800) for i in range(n)]\ny = [randint(0,600) for i in range(n)]\nr = [randint(25,50) for i in range(n)]\nred = [randint(0,255) for i in range(n)]\ngreen = [randint(0,255) for i in range(n)]\nblue = [randint(0,255) for i in range(n)]",
"_____no_output_____"
],
[
"%%brython -c other_d3 -i x y r red green blue\nfrom browser import window, document, html\n\nd3 = window.d3\n\nWIDTH = 800\nHEIGHT = 600\n\ncontainer = d3.select(\"#other_d3\")\nsvg = container.append(\"svg\").attr(\"width\", WIDTH).attr(\"height\", HEIGHT)\n\nclass AddShapes:\n def __init__(self, x, y, r, red, green, blue, shape = \"circle\", interactive = True):\n self.shape = shape\n self.interactive = interactive\n self._color = \"gray\"\n self.add(x, y, r, red, green, blue)\n\n def over(self, ev):\n self._color = ev.target.style.fill\n document[ev.target.id].style.fill = \"white\"\n \n def out(self, ev):\n document[ev.target.id].style.fill = self._color\n \n def add(self, x, y, r, red, green, blue):\n for i in range(len(x)):\n self.idx = self.shape + '_' + str(i) \n self._color = \"rgb(%s,%s,%s)\" % (red[i], green[i], blue[i])\n shaped = svg.append(self.shape).style(\"stroke\", \"gray\").style(\"fill\", self._color).attr(\"r\", r[i])\n shaped.attr(\"cx\", x[i]).attr(\"cy\", y[i]).attr(\"id\", self.idx)\n if self.interactive:\n document[self.idx].bind(\"mouseover\", self.over)\n document[self.idx].bind(\"mouseout\", self.out)\n\nplot = AddShapes(x, y, r, red, green, blue, interactive = True)",
"_____no_output_____"
],
[
"%%HTML\n<script type=\"text/javascript\" src=\"https://cdnjs.cloudflare.com/ajax/libs/openlayers/2.13.1/OpenLayers.js\"></script>",
"_____no_output_____"
],
[
"%%brython -c ol_map\n# we need to get map png in SSL\n# take a look at http://gis.stackexchange.com/questions/83953/openlayer-maps-issue-with-ssl\nfrom browser import document, window\nfrom javascript import JSConstructor, JSObject\n\n## Div layout\ndocument['ol_map'].style.width = \"800px\"\ndocument['ol_map'].style.height = \"400px\"\ndocument['ol_map'].style.border = \"1px solid black\"\n\nOpenLayers = window.OpenLayers\n\n## Map\n_map = JSConstructor(OpenLayers.Map)('ol_map')\n\n## Addition of a OpenStreetMap layer\n_layer = JSConstructor(OpenLayers.Layer.OSM)( 'Simple OSM map')\n_map.addLayer(_layer)\n\n## Map centered on Lon, Lat = (-3.671416, 40.435897) and a zoom = 14\n## with a projection = \"EPSG:4326\" (Lat-Lon WGS84)\n_proj = JSConstructor(OpenLayers.Projection)(\"EPSG:4326\")\n_center = JSConstructor(OpenLayers.LonLat)(-3.671416, 40.435897)\n_center.transform(_proj, _map.getProjectionObject())\n_map.setCenter(_center, 10)\n\n## Addition of some points around the defined location\nlons = [-3.670, -3.671, -3.672, -3.672, -3.672,\n -3.671, -3.670, -3.670]\nlats = [40.435, 40.435, 40.435, 40.436, 40.437,\n 40.437, 40.437, 40.436]\n\nsite_points = []\nsite_style = {}\n\npoints_layer = JSConstructor(OpenLayers.Layer.Vector)(\"Point Layer\")\n_map.addLayer(points_layer)\n\nfor lon, lat in zip(lons, lats):\n point = JSConstructor(OpenLayers.Geometry.Point)(lon, lat)\n point.transform(_proj, _map.getProjectionObject())\n _feat = JSConstructor(OpenLayers.Feature.Vector)(point)\n points_layer.addFeatures(_feat)",
"_____no_output_____"
],
[
"%%brython -s styling\nfrom browser import doc, html\n\n# Changing the background color\nbody = doc[html.BODY][0]\nbody.style = {\"backgroundColor\": \"#99EEFF\"}\n \n# Changing the color of the imput prompt\ninps = body.get(selector = \".input_prompt\")\nfor inp in inps:\n inp.style = {\"color\": \"blue\"}\n \n# Changin the color of the output cells\nouts = body.get(selector = \".output_wrapper\")\nfor out in outs:\n out.style = {\"backgroundColor\": \"#E0E0E0\"}\n \n# Changing the font of the text cells\ntext_cells = body.get(selector = \".text_cell\")\nfor cell in text_cells:\n cell.style = {\"fontFamily\": \"\"\"\"Courier New\", Courier, monospace\"\"\",\n \"fontSize\": \"20px\"}\n \n# Changing the color of the code cells.\ncode_cells = body.get(selector = \".CodeMirror\")\nfor cell in code_cells:\n cell.style = {\"backgroundColor\": \"#D0D0D0\"}",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a17c8862d1370179bfe9f3611c32d7dee9a21df
| 61,349 |
ipynb
|
Jupyter Notebook
|
notebook/IL/BC_shiftable.ipynb
|
ShuhuaGao/HybridHEMS
|
2037241b47bf3cd68d04e496d338bb2a133c2886
|
[
"MIT"
] | null | null | null |
notebook/IL/BC_shiftable.ipynb
|
ShuhuaGao/HybridHEMS
|
2037241b47bf3cd68d04e496d338bb2a133c2886
|
[
"MIT"
] | null | null | null |
notebook/IL/BC_shiftable.ipynb
|
ShuhuaGao/HybridHEMS
|
2037241b47bf3cd68d04e496d338bb2a133c2886
|
[
"MIT"
] | null | null | null | 118.206166 | 42,218 | 0.667199 |
[
[
[
"empty"
]
]
] |
[
"empty"
] |
[
[
"empty"
]
] |
4a17cb4c24e18f9e5da00349736d9a30897cbab7
| 121,947 |
ipynb
|
Jupyter Notebook
|
GenerativeAdversarialNetwork.ipynb
|
arayabrain/ELSIWinterSchool
|
491427310cb9fe88d887bdc0a8fcb093e9519702
|
[
"MIT"
] | 1 |
2021-07-07T07:27:46.000Z
|
2021-07-07T07:27:46.000Z
|
GenerativeAdversarialNetwork.ipynb
|
arayabrain/ELSIWinterSchool
|
491427310cb9fe88d887bdc0a8fcb093e9519702
|
[
"MIT"
] | null | null | null |
GenerativeAdversarialNetwork.ipynb
|
arayabrain/ELSIWinterSchool
|
491427310cb9fe88d887bdc0a8fcb093e9519702
|
[
"MIT"
] | null | null | null | 290.35 | 64,940 | 0.899645 |
[
[
[
"%pylab inline\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# PyTorch imports\nimport torch\n\n# This has neural network layer primitives that you can use to build things quickly\nimport torch.nn as nn\n\n# This has things like activation functions and other useful nonlinearities\nfrom torch.nn import functional as F\n\n# This has various gradient descent algorithms\nimport torch.optim\n\n# In order to take derivatives, we have to wrap things as a Variable or a Parameter.\n# Variables are things like inputs to the model\n# Parameters are things like weights\n# If you make a child class of nn.Module, it automatically keeps tracks of all parameters declared during \n# __init__ for you - really handy!\nfrom torch.autograd import Variable\nfrom torch.nn import Parameter\n\nfrom IPython import display\nimport time",
"Populating the interactive namespace from numpy and matplotlib\n"
]
],
[
[
"## Generative Adversarial Networks\n\nGenerative adversarial networks (GANs) are a method to learn to produce samples from high-dimensional distributions based only on a set of samples from that distribution. The basic idea is that you have two networks which are competing with eachother on a shared game. One network (the Generator) must create samples from the target distribution, while the other network (the Discriminator) must correctly predict whether a given sample came from the Generator or from the actual data set.\n\nFor this game, the Nash equilibrium is for the Generator to produce samples exactly according to the probability density of the data distribution, and for the Discriminator to return the probability density of a given input sample. So a trained GAN in principle gives you both a way to sample from a distribution as well as a way to evaluate the local probability density around a sample.\n\nIn practice, the Generator and Discriminator may not converge to the Nash equilibrium, but will often oscillate around it, overspecialize to sub-regions of the distribution ('mode collapse'), etc. As such, there are a large family of algorithms designed to improve the convergence properties of the basic setup. \n\nIn this example, we'll just implement a basic GAN to reproduce some 2d distributions (so that the quality of the reconstruction can be easily checked). ",
"_____no_output_____"
]
],
[
[
"# Some utility functions\n\ndef toFloatVar(x):\n return Variable(torch.FloatTensor(x), requires_grad=False)\n\ndef toLongVar(x):\n return Variable(torch.LongTensor(x), requires_grad=False)",
"_____no_output_____"
]
],
[
[
"## Generator network\n\nFirst we'll specify the Generator. This network needs to produce a distribution of outcomes, not just an input-output relationship or single output, so we need to provide it a source of noise that it will transform into the target distribution. In essence, the Generator implements a transform from one probability distribution $p(z)$ to a target distribution (in a different set of variables) $q(x)$ - one sample at a time.\n\nSo basically the procedure is, we sample a random $z$ from $p(z)$ (which will just be a high-dimensional Gaussian), then apply the network to get $x = G(z)$.",
"_____no_output_____"
]
],
[
[
"class Generator(nn.Module):\n def __init__(self, noiseDimension = 16, hiddenDimension = 64, targetDimension = 2):\n super(Generator,self).__init__()\n \n self.layer1 = nn.Linear(noiseDimension, hiddenDimension)\n self.layer2 = nn.Linear(hiddenDimension, hiddenDimension)\n self.layer3 = nn.Linear(hiddenDimension, hiddenDimension)\n self.layer4 = nn.Linear(hiddenDimension, targetDimension)\n \n self.noiseDimension = noiseDimension\n # Each network will have its own optimizer, so we can train them at cross purposes to each-other\n self.optimizer = torch.optim.Adam(self.parameters(), lr = 1e-3)\n \n # For forward, we want to get samples based on specific values of the noise input\n def forward(self, x):\n z = F.relu(self.layer1(x))\n z = F.relu(self.layer2(z))\n z = F.relu(self.layer3(z))\n z = self.layer4(z)\n \n return z\n \n # For convenience, lets also make a function that generates a batch of random samples\n def sample(self, N=100):\n z = toFloatVar(np.random.randn(N, self.noiseDimension))\n return self.forward(z)",
"_____no_output_____"
]
],
[
[
"## Discriminator Network\n\nThe Discriminator network takes a sample either from the true dataset or from fakes made by the Generator, and should return a probability that the sample is real or fake. ",
"_____no_output_____"
]
],
[
[
"class Discriminator(nn.Module):\n def __init__(self, hiddenDimension = 64, targetDimension = 2):\n super(Discriminator,self).__init__()\n \n self.layer1 = nn.Linear(targetDimension, hiddenDimension)\n self.layer2 = nn.Linear(hiddenDimension, hiddenDimension)\n self.layer3 = nn.Linear(hiddenDimension, hiddenDimension)\n self.layer4 = nn.Linear(hiddenDimension, 1)\n \n # Each network will have its own optimizer, so we can train them at cross purposes to each-other\n self.optimizer = torch.optim.Adam(self.parameters(), lr = 1e-3)\n \n def forward(self, x):\n z = F.relu(self.layer1(x))\n z = F.relu(self.layer2(z))\n z = F.relu(self.layer3(z))\n \n # Clamp for numerical stability\n z = torch.clamp( F.sigmoid(self.layer4(z)), 1e-6, 1-1e-6)\n \n return z",
"_____no_output_____"
]
],
[
[
"## Training\n\nThe training procedure involves two steps: training the Discriminator and training the Generator. We'll do these separately for clarity, despite that introducing a bit of redundancy.\n\nTraining the discriminator:\n- Form a batch which contains 50% samples from true distribution and 50% samples from the generator\n- If $D()$ is the output of the discriminator and $x$ the true data, minimize the logistic loss: $L = -\\log(D(x)) - \\log(1-D(G(z)))$\n- Update the discriminator weights only\n\nTraining the generator:\n- Form a batch containing 100% samples from the generator\n- Apply the discriminator to get $D(G(z))$\n- Update the generator to maximize the discriminator's loss: $L = \\log(1-D(G(z)))$.",
"_____no_output_____"
]
],
[
[
"def trainDiscriminator(data, generator, discriminator):\n fakes = generator.sample(N=data.shape[0])\n \n # Zero the discriminator gradient\n discriminator.zero_grad()\n \n # Get the fake batch and true batch\n p_fakes = discriminator.forward(fakes)\n p_true = discriminator.forward(data)\n \n # Compute the loss\n loss = torch.mean(-torch.log(p_true)) + torch.mean(-torch.log(1-p_fakes))\n \n # Update the discriminator weights only\n loss.backward()\n discriminator.optimizer.step()\n \n # Get the loss to follow training progress\n return loss.data.numpy().copy()\n\n# Training the generator doesn't require access to the dataset\n# Careful though - training to completion on a fixed discriminator leads to mode collapse\n# We have to train them together dynamically\n\ndef trainGenerator(generator, discriminator):\n # Zero generator gradient\n generator.zero_grad()\n \n fakes = generator.sample(N=250)\n p_fakes = discriminator.forward(fakes)\n \n # Get the generator loss\n loss = torch.mean(torch.log(1-p_fakes))\n \n # Update generator weights\n loss.backward()\n generator.optimizer.step()\n \n # Track generator loss for training\n return loss.data.numpy().copy()",
"_____no_output_____"
]
],
[
[
"## Data distribution\n\nWe'll learn a simple bimodal distribution to test the GAN",
"_____no_output_____"
]
],
[
[
"def generateData(N):\n # Generate which mode we're in\n x = np.random.randint(2,size=(N,1))\n \n # Generate Gaussian fluctuations around the mode\n z = np.random.randn(N,2)*0.5\n \n # Centers of the two modes\n centers = np.array([[-1.5,0.5], [0.6, 1.3]])\n \n return centers[x[:,0]] + z\n\ndata = generateData(250)\n\nplt.scatter(data[:,0],data[:,1])\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Training the GAN",
"_____no_output_____"
]
],
[
[
"generator = Generator()\ndiscriminator = Discriminator()\n\ngen_loss = []\ndisc_loss = []\n\nfor epoch in range(1000):\n # It's often better for the discriminator to be slightly better than the generator for stability\n # So we'll use two steps here\n dl = trainDiscriminator(toFloatVar(data), generator, discriminator)\n dl = trainDiscriminator(toFloatVar(data), generator, discriminator)\n \n gl = trainGenerator(generator, discriminator)\n \n gen_loss.append(gl)\n disc_loss.append(dl)\n \n if epoch%5 == 0:\n samples = generator.sample(N=250)\n\n plt.clf()\n plt.subplot(1,2,1)\n plt.title(\"Generated Distribution\")\n plt.scatter(data[:,0],data[:,1])\n plt.scatter(samples[:,0],samples[:,1])\n plt.xlim(-4,2.5)\n plt.ylim(-1.5,4)\n\n plt.subplot(1,2,2)\n plt.title(\"Training Loss\")\n plt.plot(disc_loss,label=\"Discriminator\")\n plt.plot(gen_loss,label=\"Generator\")\n plt.legend()\n\n plt.gcf().set_size_inches((12,6))\n display.clear_output(wait=True)\n display.display(plt.gcf())\n time.sleep(0.01)",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a17def4b7ba7aabb27997fc76b24be632066ada
| 16,922 |
ipynb
|
Jupyter Notebook
|
notebooks/Lab 9 Notebook_krhodes.ipynb
|
krhodes9/geemap-heroku
|
af81d9b8892a55e75d6b2788b241737d4bfb0ae2
|
[
"MIT"
] | null | null | null |
notebooks/Lab 9 Notebook_krhodes.ipynb
|
krhodes9/geemap-heroku
|
af81d9b8892a55e75d6b2788b241737d4bfb0ae2
|
[
"MIT"
] | null | null | null |
notebooks/Lab 9 Notebook_krhodes.ipynb
|
krhodes9/geemap-heroku
|
af81d9b8892a55e75d6b2788b241737d4bfb0ae2
|
[
"MIT"
] | null | null | null | 36.867102 | 848 | 0.504255 |
[
[
[
"## Include the script for your app below. Be sure to include the instructions!",
"_____no_output_____"
]
],
[
[
"import os\nimport ee\nimport geemap\nimport ipywidgets as widgets\nfrom bqplot import pyplot as plt\nfrom ipyleaflet import WidgetControl",
"_____no_output_____"
],
[
"ee.Authenticate()\nee.Initialize()",
"_____no_output_____"
],
[
"# Create an interactive map\nMap = geemap.Map(center=[40, -100], zoom=4, add_google_map=False)\nMap.add_basemap('HYBRID')\nMap.add_basemap('ROADMAP')\n\n# Add Earth Engine data\nfc = ee.FeatureCollection('TIGER/2018/Counties')\nMap.addLayer(fc, {}, 'US Counties')\n\nstates = ee.FeatureCollection('TIGER/2018/States')\nMap.addLayer(states, {}, 'US States')\n\nMap",
"_____no_output_____"
],
[
"# Designe interactive widgets\n\nstyle = {'description_width': 'initial'}\n\noutput_widget = widgets.Output(layout={'border': '1px solid black'})\noutput_control = WidgetControl(widget=output_widget, position='bottomright')\nMap.add_control(output_control)\n\nadmin1_widget = widgets.Text(\n description='State:',\n value='Tennessee',\n width=200,\n style=style\n)\n\nadmin2_widget = widgets.Text(\n description='County:',\n value='Knox',\n width=300,\n style=style\n)\n\naoi_widget = widgets.Checkbox(\n value=False,\n description='Use user-drawn AOI',\n style=style\n)\n\ndownload_widget = widgets.Checkbox(\n value=False,\n description='Download chart data',\n style=style\n)\n\ndef aoi_change(change):\n Map.layers = Map.layers[:4] \n Map.user_roi = None\n Map.user_rois = None\n Map.draw_count = 0\n admin1_widget.value = ''\n admin2_widget.value = ''\n output_widget.clear_output()\n \naoi_widget.observe(aoi_change, names='value')\n\nband_combo = widgets.Dropdown(\n description='Band combo:',\n options=['Red/Green/Blue', 'NIR/Red/Green', 'SWIR2/SWIR1/NIR', 'NIR/SWIR1/Red','SWIR2/NIR/Red', \n 'SWIR2/SWIR1/Red', 'SWIR1/NIR/Blue', 'NIR/SWIR1/Blue', 'SWIR2/NIR/Green', 'SWIR1/NIR/Red'],\n value='NIR/Red/Green',\n style=style\n)\n\nyear_widget = widgets.IntSlider(min=1984, max=2020, value=2010, description='Selected year:', width=400, style=style)\n\nfmask_widget = widgets.Checkbox(\n value=True,\n description='Apply fmask(remove cloud, shadow, snow)',\n style=style\n)\n",
"_____no_output_____"
],
[
"# Normalized Satellite Indices: https://www.usna.edu/Users/oceano/pguth/md_help/html/norm_sat.htm\n\nnd_options = ['Vegetation Index (NDVI)', \n 'Water Index (NDWI)',\n 'Modified Water Index (MNDWI)',\n 'Snow Index (NDSI)',\n 'Soil Index (NDSI)',\n 'Burn Ratio (NBR)',\n 'Customized']\nnd_indices = widgets.Dropdown(options=nd_options, value='Modified Water Index (MNDWI)', description='Normalized Difference Indes:', style=style)\n\nfirst_band = widgets.Dropdown(\n description='1st band:',\n options=['Blue', 'Green','Red','NIR', 'SWIR1', 'SWIR2'],\n value='Green',\n style=style\n)\n\nsecond_band = widgets.Dropdown(\n description='2nd band:',\n options=['Blue', 'Green','Red','NIR', 'SWIR1', 'SWIR2'],\n value='SWIR1',\n style=style\n)\n\nnd_threshold = widgets.FloatSlider(\n value=0,\n min=-1,\n max=1,\n step=0.01,\n description='Threshold:',\n orientation='horizontal',\n style=style\n)\n\nnd_color = widgets.ColorPicker(\n concise=False,\n description='Color:',\n value='blue',\n style=style\n)\n\ndef nd_index_change(change):\n if nd_indices.value == 'Vegetation Index (NDVI)':\n first_band.value = 'NIR'\n second_band.value = 'Red'\n elif nd_indices.value == 'Water Index (NDWI)':\n first_band.value = 'NIR'\n second_band.value = 'SWIR1' \n elif nd_indices.value == 'Modified Water Index (MNDWI)':\n first_band.value = 'Green'\n second_band.value = 'SWIR1' \n elif nd_indices.value == 'Snow Index (NDSI)':\n first_band.value = 'Green'\n second_band.value = 'SWIR1'\n elif nd_indices.value == 'Soil Index (NDSI)':\n first_band.value = 'SWIR1'\n second_band.value = 'NIR' \n elif nd_indices.value == 'Burn Ratio (NBR)':\n first_band.value = 'NIR'\n second_band.value = 'SWIR2'\n elif nd_indices.value == 'Customized':\n first_band.value = None\n second_band.value = None\n \nnd_indices.observe(nd_index_change, names='value')\n\nsubmit = widgets.Button(\n description='Submit',\n button_style='primary',\n tooltip='Click me',\n style=style\n)\n\nfull_widget = widgets.VBox([\n widgets.HBox([admin1_widget, admin2_widget, aoi_widget, download_widget]),\n widgets.HBox([band_combo, year_widget, fmask_widget]),\n widgets.HBox([nd_indices, first_band, second_band, nd_threshold, nd_color]),\n submit\n])\n\nfull_widget\n",
"_____no_output_____"
],
[
"# Capture user interaction with the map\n\ndef handle_interaction(**kwargs):\n latlon = kwargs.get('coordinates')\n if kwargs.get('type') == 'click' and not aoi_widget.value:\n Map.default_style = {'cursor': 'wait'}\n xy = ee.Geometry.Point(latlon[::-1])\n selected_fc = fc.filterBounds(xy)\n \n with output_widget:\n output_widget.clear_output()\n \n try:\n feature = selected_fc.first()\n admin2_id = feature.get('NAME').getInfo()\n statefp = feature.get('STATEFP')\n admin1_fc = ee.Feature(states.filter(ee.Filter.eq('STATEFP', statefp)).first()) \n admin1_id = admin1_fc.get('NAME').getInfo()\n admin1_widget.value = admin1_id\n admin2_widget.value = admin2_id\n Map.layers = Map.layers[:4] \n geom = selected_fc.geometry()\n layer_name = admin1_id + '-' + admin2_id\n Map.addLayer(ee.Image().paint(geom, 0, 2), {'palette': 'red'}, layer_name) \n print(layer_name)\n except:\n print('No feature could be found')\n Map.layers = Map.layers[:4] \n \n Map.default_style = {'cursor': 'pointer'}\n else:\n Map.draw_count = 0\n\nMap.on_interaction(handle_interaction)\n",
"_____no_output_____"
],
[
"# Click event handler\n\ndef submit_clicked(b):\n \n with output_widget:\n output_widget.clear_output()\n print('Computing...')\n Map.default_style = {'cursor': 'wait'}\n\n try:\n admin1_id = admin1_widget.value\n admin2_id = admin2_widget.value\n band1 = first_band.value\n band2 = second_band.value\n selected_year = year_widget.value\n threshold = nd_threshold.value\n bands = band_combo.value.split('/')\n apply_fmask = fmask_widget.value\n palette = nd_color.value\n use_aoi = aoi_widget.value\n download = download_widget.value\n \n if use_aoi:\n if Map.user_roi is not None:\n roi = Map.user_roi\n layer_name = 'User drawn AOI'\n geom = roi\n else:\n output_widget.clear_output() \n print('No user AOI could be found.')\n return\n else:\n \n statefp = ee.Feature(states.filter(ee.Filter.eq('NAME', admin1_id)).first()).get('STATEFP')\n roi = fc.filter(ee.Filter.And(ee.Filter.eq('NAME', admin2_id), ee.Filter.eq('STATEFP', statefp)))\n layer_name = admin1_id + '-' + admin2_id\n geom = roi.geometry()\n\n\n Map.layers = Map.layers[:4] \n Map.addLayer(ee.Image().paint(geom, 0, 2), {'palette': 'red'}, layer_name) \n \n images = geemap.landsat_timeseries(roi=roi, start_year=1984, end_year=2020, start_date='01-01', end_date='12-31', apply_fmask=apply_fmask)\n nd_images = images.map(lambda img: img.normalizedDifference([band1, band2]))\n result_images = nd_images.map(lambda img: img.gt(threshold))\n\n selected_image = ee.Image(images.toList(images.size()).get(selected_year - 1984))\n selected_result_image = ee.Image(result_images.toList(result_images.size()).get(selected_year - 1984)).selfMask()\n \n vis_params = {\n 'bands': bands,\n 'min': 0,\n 'max': 3000\n }\n \n Map.addLayer(selected_image, vis_params, 'Landsat ' + str(selected_year))\n Map.addLayer(selected_result_image, {'palette': palette}, 'Result ' + str(selected_year))\n\n \n def cal_area(img):\n pixel_area = img.multiply(ee.Image.pixelArea()).divide(1e4)\n img_area = pixel_area.reduceRegion(**{\n 'geometry': geom,\n 'reducer': ee.Reducer.sum(),\n 'scale': 1000,\n 'maxPixels': 1e12,\n 'bestEffort': True\n })\n return img.set({'area': img_area})\n \n areas = result_images.map(cal_area)\n stats = areas.aggregate_array('area').getInfo()\n x = list(range(1984, 2021))\n y = [item.get('nd') for item in stats]\n \n fig = plt.figure(1)\n fig.layout.height = '270px'\n plt.clear()\n plt.plot(x, y)\n plt.title('Temporal trend (1984-2020)')\n plt.xlabel('Year')\n plt.ylabel('Area (ha)')\n \n output_widget.clear_output() \n\n plt.show()\n \n if download:\n out_dir = os.path.join(os.path.expanduser('~'), 'Downloads')\n out_name = 'chart_' + geemap.random_string() + '.csv'\n out_csv = os.path.join(out_dir, out_name)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n with open(out_csv, 'w') as f:\n f.write('year, area (ha)\\n')\n for index, item in enumerate(x):\n line = '{},{:.2f}\\n'.format(item, y[index])\n f.write(line) \n link = geemap.create_download_link(\n out_csv, title=\"Click here to download the chart data: \")\n display(link)\n \n except Exception as e:\n print(e)\n print('An error occurred during computation.') \n\n Map.default_style = {'cursor': 'default'}\n\nsubmit.on_click(submit_clicked)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a17ee7c7d8182256545142ba6a0ddde08296f3f
| 124,073 |
ipynb
|
Jupyter Notebook
|
docs/source/examples/Widget List.ipynb
|
y1ngyang/ipywidgets
|
4d10dc9abdbfc26b62dcf2d5cd660d4e7a603ae5
|
[
"BSD-3-Clause"
] | null | null | null |
docs/source/examples/Widget List.ipynb
|
y1ngyang/ipywidgets
|
4d10dc9abdbfc26b62dcf2d5cd660d4e7a603ae5
|
[
"BSD-3-Clause"
] | 200 |
2019-02-07T18:19:36.000Z
|
2021-07-29T08:37:12.000Z
|
docs/source/examples/Widget List.ipynb
|
y1ngyang/ipywidgets
|
4d10dc9abdbfc26b62dcf2d5cd660d4e7a603ae5
|
[
"BSD-3-Clause"
] | null | null | null | 36.321136 | 30,763 | 0.662062 |
[
[
[
"[Index](Index.ipynb) - [Back](Widget Basics.ipynb) - [Next](Output Widget.ipynb)",
"_____no_output_____"
],
[
"# Widget List",
"_____no_output_____"
]
],
[
[
"import ipywidgets as widgets",
"_____no_output_____"
]
],
[
[
"## Numeric widgets",
"_____no_output_____"
],
[
"There are many widgets distributed with IPython that are designed to display numeric values. Widgets exist for displaying integers and floats, both bounded and unbounded. The integer widgets share a similar naming scheme to their floating point counterparts. By replacing `Float` with `Int` in the widget name, you can find the Integer equivalent.",
"_____no_output_____"
],
[
"### IntSlider",
"_____no_output_____"
]
],
[
[
"widgets.IntSlider(\n value=7,\n min=0,\n max=10,\n step=1,\n description='Test:',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='d'\n)",
"_____no_output_____"
]
],
[
[
"### FloatSlider",
"_____no_output_____"
]
],
[
[
"widgets.FloatSlider(\n value=7.5,\n min=0,\n max=10.0,\n step=0.1,\n description='Test:',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='.1f',\n)",
"_____no_output_____"
]
],
[
[
"Sliders can also be **displayed vertically**.",
"_____no_output_____"
]
],
[
[
"widgets.FloatSlider(\n value=7.5,\n min=0,\n max=10.0,\n step=0.1,\n description='Test:',\n disabled=False,\n continuous_update=False,\n orientation='vertical',\n readout=True,\n readout_format='.1f',\n)",
"_____no_output_____"
]
],
[
[
"### FloatLogSlider",
"_____no_output_____"
],
[
"The `FloatLogSlider` has a log scale, which makes it easy to have a slider that covers a wide range of positive magnitudes. The `min` and `max` refer to the minimum and maximum exponents of the base, and the `value` refers to the actual value of the slider.",
"_____no_output_____"
]
],
[
[
"widgets.FloatLogSlider(\n value=10,\n base=10,\n min=-10, # max exponent of base\n max=10, # min exponent of base\n step=0.2, # exponent step\n description='Log Slider'\n)",
"_____no_output_____"
]
],
[
[
"### IntRangeSlider",
"_____no_output_____"
]
],
[
[
"widgets.IntRangeSlider(\n value=[5, 7],\n min=0,\n max=10,\n step=1,\n description='Test:',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='d',\n)",
"_____no_output_____"
]
],
[
[
"### FloatRangeSlider",
"_____no_output_____"
]
],
[
[
"widgets.FloatRangeSlider(\n value=[5, 7.5],\n min=0,\n max=10.0,\n step=0.1,\n description='Test:',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='.1f',\n)",
"_____no_output_____"
]
],
[
[
"### IntProgress",
"_____no_output_____"
]
],
[
[
"widgets.IntProgress(\n value=7,\n min=0,\n max=10,\n step=1,\n description='Loading:',\n bar_style='', # 'success', 'info', 'warning', 'danger' or ''\n orientation='horizontal'\n)",
"_____no_output_____"
]
],
[
[
"### FloatProgress",
"_____no_output_____"
]
],
[
[
"widgets.FloatProgress(\n value=7.5,\n min=0,\n max=10.0,\n step=0.1,\n description='Loading:',\n bar_style='info',\n orientation='horizontal'\n)",
"_____no_output_____"
]
],
[
[
"The numerical text boxes that impose some limit on the data (range, integer-only) impose that restriction when the user presses enter.\n\n### BoundedIntText",
"_____no_output_____"
]
],
[
[
"widgets.BoundedIntText(\n value=7,\n min=0,\n max=10,\n step=1,\n description='Text:',\n disabled=False\n)",
"_____no_output_____"
]
],
[
[
"### BoundedFloatText",
"_____no_output_____"
]
],
[
[
"widgets.BoundedFloatText(\n value=7.5,\n min=0,\n max=10.0,\n step=0.1,\n description='Text:',\n disabled=False\n)",
"_____no_output_____"
]
],
[
[
"### IntText",
"_____no_output_____"
]
],
[
[
"widgets.IntText(\n value=7,\n description='Any:',\n disabled=False\n)",
"_____no_output_____"
]
],
[
[
"### FloatText",
"_____no_output_____"
]
],
[
[
"widgets.FloatText(\n value=7.5,\n description='Any:',\n disabled=False\n)",
"_____no_output_____"
]
],
[
[
"## Boolean widgets",
"_____no_output_____"
],
[
"There are three widgets that are designed to display a boolean value.",
"_____no_output_____"
],
[
"### ToggleButton",
"_____no_output_____"
]
],
[
[
"widgets.ToggleButton(\n value=False,\n description='Click me',\n disabled=False,\n button_style='', # 'success', 'info', 'warning', 'danger' or ''\n tooltip='Description',\n icon='check'\n)",
"_____no_output_____"
]
],
[
[
"### Checkbox",
"_____no_output_____"
]
],
[
[
"widgets.Checkbox(\n value=False,\n description='Check me',\n disabled=False\n)",
"_____no_output_____"
]
],
[
[
"### Valid\n\nThe valid widget provides a read-only indicator.",
"_____no_output_____"
]
],
[
[
"widgets.Valid(\n value=False,\n description='Valid!',\n)",
"_____no_output_____"
]
],
[
[
"## Selection widgets",
"_____no_output_____"
],
[
"There are several widgets that can be used to display single selection lists, and two that can be used to select multiple values. All inherit from the same base class. You can specify the **enumeration of selectable options by passing a list** (options are either (label, value) pairs, or simply values for which the labels are derived by calling `str`). You can **also specify the enumeration as a dictionary**, in which case the **keys will be used as the item displayed** in the list and the corresponding **value will be used** when an item is selected (in this case, since dictionaries are unordered, the displayed order of items in the widget is unspecified).",
"_____no_output_____"
],
[
"### Dropdown",
"_____no_output_____"
]
],
[
[
"widgets.Dropdown(\n options=['1', '2', '3'],\n value='2',\n description='Number:',\n disabled=False,\n)",
"_____no_output_____"
]
],
[
[
"The following is also valid:",
"_____no_output_____"
]
],
[
[
"widgets.Dropdown(\n options={'One': 1, 'Two': 2, 'Three': 3},\n value=2,\n description='Number:',\n)",
"_____no_output_____"
]
],
[
[
"### RadioButtons",
"_____no_output_____"
]
],
[
[
"widgets.RadioButtons(\n options=['pepperoni', 'pineapple', 'anchovies'],\n# value='pineapple',\n description='Pizza topping:',\n disabled=False\n)",
"_____no_output_____"
]
],
[
[
"### Select",
"_____no_output_____"
]
],
[
[
"widgets.Select(\n options=['Linux', 'Windows', 'OSX'],\n value='OSX',\n # rows=10,\n description='OS:',\n disabled=False\n)",
"_____no_output_____"
]
],
[
[
"### SelectionSlider",
"_____no_output_____"
]
],
[
[
"widgets.SelectionSlider(\n options=['scrambled', 'sunny side up', 'poached', 'over easy'],\n value='sunny side up',\n description='I like my eggs ...',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=True\n)",
"_____no_output_____"
]
],
[
[
"### SelectionRangeSlider\n\nThe value, index, and label keys are 2-tuples of the min and max values selected. The options must be nonempty.",
"_____no_output_____"
]
],
[
[
"import datetime\ndates = [datetime.date(2015,i,1) for i in range(1,13)]\noptions = [(i.strftime('%b'), i) for i in dates]\nwidgets.SelectionRangeSlider(\n options=options,\n index=(0,11),\n description='Months (2015)',\n disabled=False\n)",
"_____no_output_____"
]
],
[
[
"### ToggleButtons",
"_____no_output_____"
]
],
[
[
"widgets.ToggleButtons(\n options=['Slow', 'Regular', 'Fast'],\n description='Speed:',\n disabled=False,\n button_style='', # 'success', 'info', 'warning', 'danger' or ''\n tooltips=['Description of slow', 'Description of regular', 'Description of fast'],\n# icons=['check'] * 3\n)",
"_____no_output_____"
]
],
[
[
"### SelectMultiple\nMultiple values can be selected with <kbd>shift</kbd> and/or <kbd>ctrl</kbd> (or <kbd>command</kbd>) pressed and mouse clicks or arrow keys.",
"_____no_output_____"
]
],
[
[
"widgets.SelectMultiple(\n options=['Apples', 'Oranges', 'Pears'],\n value=['Oranges'],\n #rows=10,\n description='Fruits',\n disabled=False\n)",
"_____no_output_____"
]
],
[
[
"## String widgets",
"_____no_output_____"
],
[
"There are several widgets that can be used to display a string value. The `Text` and `Textarea` widgets accept input. The `HTML` and `HTMLMath` widgets display a string as HTML (`HTMLMath` also renders math). The `Label` widget can be used to construct a custom control label.",
"_____no_output_____"
],
[
"### Text",
"_____no_output_____"
]
],
[
[
"widgets.Text(\n value='Hello World',\n placeholder='Type something',\n description='String:',\n disabled=False \n)",
"_____no_output_____"
]
],
[
[
"### Textarea",
"_____no_output_____"
]
],
[
[
"widgets.Textarea(\n value='Hello World',\n placeholder='Type something',\n description='String:',\n disabled=False\n)",
"_____no_output_____"
]
],
[
[
"### Label\n\nThe `Label` widget is useful if you need to build a custom description next to a control using similar styling to the built-in control descriptions.",
"_____no_output_____"
]
],
[
[
"widgets.HBox([widgets.Label(value=\"The $m$ in $E=mc^2$:\"), widgets.FloatSlider()])",
"_____no_output_____"
]
],
[
[
"### HTML",
"_____no_output_____"
]
],
[
[
"widgets.HTML(\n value=\"Hello <b>World</b>\",\n placeholder='Some HTML',\n description='Some HTML',\n)",
"_____no_output_____"
]
],
[
[
"### HTML Math",
"_____no_output_____"
]
],
[
[
"widgets.HTMLMath(\n value=r\"Some math and <i>HTML</i>: \\(x^2\\) and $$\\frac{x+1}{x-1}$$\",\n placeholder='Some HTML',\n description='Some HTML',\n)",
"_____no_output_____"
]
],
[
[
"## Image",
"_____no_output_____"
]
],
[
[
"file = open(\"images/WidgetArch.png\", \"rb\")\nimage = file.read()\nwidgets.Image(\n value=image,\n format='png',\n width=300,\n height=400,\n)",
"_____no_output_____"
]
],
[
[
"## Button",
"_____no_output_____"
]
],
[
[
"widgets.Button(\n description='Click me',\n disabled=False,\n button_style='', # 'success', 'info', 'warning', 'danger' or ''\n tooltip='Click me',\n icon='check'\n)",
"_____no_output_____"
]
],
[
[
"## Output\n\nThe `Output` widget can capture and display stdout, stderr and [rich output generated by IPython](http://ipython.readthedocs.io/en/stable/api/generated/IPython.display.html#module-IPython.display). For detailed documentation, see the [output widget examples](/examples/Output Widget.html).",
"_____no_output_____"
],
[
"## Play (Animation) widget",
"_____no_output_____"
],
[
"The `Play` widget is useful to perform animations by iterating on a sequence of integers with a certain speed. The value of the slider below is linked to the player.",
"_____no_output_____"
]
],
[
[
"play = widgets.Play(\n# interval=10,\n value=50,\n min=0,\n max=100,\n step=1,\n description=\"Press play\",\n disabled=False\n)\nslider = widgets.IntSlider()\nwidgets.jslink((play, 'value'), (slider, 'value'))\nwidgets.HBox([play, slider])",
"_____no_output_____"
]
],
[
[
"## Date picker\n\nThe date picker widget works in Chrome and IE Edge, but does not currently work in Firefox or Safari because they do not support the HTML date input field.",
"_____no_output_____"
]
],
[
[
"widgets.DatePicker(\n description='Pick a Date',\n disabled=False\n)",
"_____no_output_____"
]
],
[
[
"## Color picker",
"_____no_output_____"
]
],
[
[
"widgets.ColorPicker(\n concise=False,\n description='Pick a color',\n value='blue',\n disabled=False\n)",
"_____no_output_____"
]
],
[
[
"## Controller\n\nThe `Controller` allows a game controller to be used as an input device.",
"_____no_output_____"
]
],
[
[
"widgets.Controller(\n index=0,\n)",
"_____no_output_____"
]
],
[
[
"## Container/Layout widgets\n\nThese widgets are used to hold other widgets, called children. Each has a `children` property that may be set either when the widget is created or later.",
"_____no_output_____"
],
[
"### Box",
"_____no_output_____"
]
],
[
[
"items = [widgets.Label(str(i)) for i in range(4)]\nwidgets.Box(items)",
"_____no_output_____"
]
],
[
[
"### HBox",
"_____no_output_____"
]
],
[
[
"items = [widgets.Label(str(i)) for i in range(4)]\nwidgets.HBox(items)",
"_____no_output_____"
]
],
[
[
"### VBox",
"_____no_output_____"
]
],
[
[
"items = [widgets.Label(str(i)) for i in range(4)]\nleft_box = widgets.VBox([items[0], items[1]])\nright_box = widgets.VBox([items[2], items[3]])\nwidgets.HBox([left_box, right_box])",
"_____no_output_____"
]
],
[
[
"### Accordion",
"_____no_output_____"
]
],
[
[
"accordion = widgets.Accordion(children=[widgets.IntSlider(), widgets.Text()])\naccordion.set_title(0, 'Slider')\naccordion.set_title(1, 'Text')\naccordion",
"_____no_output_____"
]
],
[
[
"### Tabs\n\nIn this example the children are set after the tab is created. Titles for the tabes are set in the same way they are for `Accordion`.",
"_____no_output_____"
]
],
[
[
"tab_contents = ['P0', 'P1', 'P2', 'P3', 'P4']\nchildren = [widgets.Text(description=name) for name in tab_contents]\ntab = widgets.Tab()\ntab.children = children\nfor i in range(len(children)):\n tab.set_title(i, str(i))\ntab",
"_____no_output_____"
]
],
[
[
"### Accordion and Tab use `selected_index`, not value\n\nUnlike the rest of the widgets discussed earlier, the container widgets `Accordion` and `Tab` update their `selected_index` attribute when the user changes which accordion or tab is selected. That means that you can both see what the user is doing *and* programmatically set what the user sees by setting the value of `selected_index`.\n\nSetting `selected_index = None` closes all of the accordions or deselects all tabs.",
"_____no_output_____"
],
[
"In the cells below try displaying or setting the `selected_index` of the `tab` and/or `accordion`.",
"_____no_output_____"
]
],
[
[
"tab.selected_index = 3",
"_____no_output_____"
],
[
"accordion.selected_index = None",
"_____no_output_____"
]
],
[
[
"### Nesting tabs and accordions\n\nTabs and accordions can be nested as deeply as you want. If you have a few minutes, try nesting a few accordions or putting an accordion inside a tab or a tab inside an accordion. \n\nThe example below makes a couple of tabs with an accordion children in one of them",
"_____no_output_____"
]
],
[
[
"tab_nest = widgets.Tab()\ntab_nest.children = [accordion, accordion]\ntab_nest.set_title(0, 'An accordion')\ntab_nest.set_title(1, 'Copy of the accordion')\ntab_nest",
"_____no_output_____"
]
],
[
[
"[Index](Index.ipynb) - [Back](Widget Basics.ipynb) - [Next](Output Widget.ipynb)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a17f53d1c5f5bc8df9de247fe5e5d506085c654
| 1,768 |
ipynb
|
Jupyter Notebook
|
notebooks/kernels/kernels.ipynb
|
etarakci-hvl/pyprobml
|
a3fe8086844ae0885e3f21d30be5f2e6448cdeba
|
[
"MIT"
] | 1 |
2020-11-08T17:03:15.000Z
|
2020-11-08T17:03:15.000Z
|
notebooks/kernels/kernels.ipynb
|
etarakci-hvl/pyprobml
|
a3fe8086844ae0885e3f21d30be5f2e6448cdeba
|
[
"MIT"
] | null | null | null |
notebooks/kernels/kernels.ipynb
|
etarakci-hvl/pyprobml
|
a3fe8086844ae0885e3f21d30be5f2e6448cdeba
|
[
"MIT"
] | null | null | null | 26.787879 | 359 | 0.526018 |
[
[
[
"<a href=\"https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/kernels/kernels.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
],
[
[
"# Other kernel methods\n\n[Gaussian processes](https://colab.sandbox.google.com/github/probml/pyprobml/blob/master/notebooks/gp/gp.ipynb) use positive definite (Mercer) kernels to define priors and posteriors over functions. However, we can also use such kernels to define function spaces for use in non-Bayesian applications. In this notebook, we give some examples of this.",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a17fea68ae82253a0de4f861b6d542a85c2d630
| 25,455 |
ipynb
|
Jupyter Notebook
|
tutorials/advanced/BRAT_Annotations.ipynb
|
dhimmel/snorkel
|
a93cea9e6bf0af436291d3c633a44e48199d5c67
|
[
"Apache-2.0"
] | 30 |
2019-08-22T19:27:59.000Z
|
2022-03-13T22:03:15.000Z
|
tutorials/advanced/BRAT_Annotations.ipynb
|
jasontlam/snorkel
|
093b752cf8656f5b0d8f8e73072fedfcfd68b1de
|
[
"Apache-2.0"
] | 2 |
2019-08-22T16:51:58.000Z
|
2022-03-21T02:59:18.000Z
|
tutorials/advanced/BRAT_Annotations.ipynb
|
jasontlam/snorkel
|
093b752cf8656f5b0d8f8e73072fedfcfd68b1de
|
[
"Apache-2.0"
] | 31 |
2019-08-22T19:28:08.000Z
|
2022-03-23T12:50:49.000Z
| 39.22188 | 5,618 | 0.662817 |
[
[
[
"# Advanced Tutorial: Creating Gold Annotation Labels with BRAT\n\nThis is a short tutorial on how to use BRAT (Brat Rapid Annotation Tool), an\nonline environment for collaborative text annotation. \n\nhttp://brat.nlplab.org/\n",
"_____no_output_____"
]
],
[
[
"%load_ext autoreload\n%autoreload 2\n%matplotlib inline\nimport os\n\n# TO USE A DATABASE OTHER THAN SQLITE, USE THIS LINE\n# Note that this is necessary for parallel execution amongst other things...\n# os.environ['SNORKELDB'] = 'postgres:///snorkel-intro'\n\nfrom snorkel import SnorkelSession\nsession = SnorkelSession()",
"_____no_output_____"
]
],
[
[
"## Step 1: Define a `Candidate` Type\n\nWe repeat our definition of the `Spouse` `Candidate` subclass from Parts II and III.",
"_____no_output_____"
]
],
[
[
"from snorkel.models import candidate_subclass, Document, Candidate\n\nSpouse = candidate_subclass('Spouse', ['person1', 'person2'])",
"_____no_output_____"
]
],
[
[
"### a) Select an example `Candidate` and `Document` \n\nCandidates are divided into 3 splits, each mapped to a unique integer id: \n- 0: _training_ \n- 1: _development_ \n- 2: _testing_ \n\nIn this tutorial, we'll load our training set candidates and create gold labels for a document using the BRAT interface",
"_____no_output_____"
],
[
"## Step 2: Launching BRAT\nBRAT runs as as seperate server application. Snorkel will automatically download and configure a BRAT instance for you. When you first initialize this server, you need to provide your applications `Candidate` type. For this tutorial, we use the `Spouse` relation defined above, which consists of a pair of `PERSON` named entities connected by marriage. \n\nCurrently, we only support 1 relation type per-application. ",
"_____no_output_____"
]
],
[
[
"from snorkel.contrib.brat import BratAnnotator\n\nbrat = BratAnnotator(session, Spouse, encoding='utf-8')",
"Downloading BRAT [http://weaver.nlplab.org/~brat/releases/brat-v1.3_Crunchy_Frog.tar.gz]...\nInstalling BRAT...\nLaunching BRAT server at http://localhost:8001 [pid=44647]...\n"
]
],
[
[
"### a) Initialize our document collection\n\nBRAT creates a local copy of all the documents and annotations found in a `split` set. We initialize a document collection by defining a unique set name, _spouse/train_, and then passing in our training set candidates via the `split` id. Annotations are stored as plain text files in [standoff](http://brat.nlplab.org/standoff.html) format.\n\n<img align=\"left\" src=\"imgs/brat-login.jpg\" width=\"200px\" style=\"margin-right:50px\">\n\nAfter launching the BRAT annotator for the first time, you will need to login to begin editing annotations. Navigate your mouse to the upper right-hand corner of the BRAT interface (see Fig. 1) click 'login' and enter the following information:\n\n- **login**: _brat_\n- **password**: _brat_",
"_____no_output_____"
],
[
"Advanced BRAT users can setup multiple annotator accounts by adding USER/PASSWORD key pairs to the `USER_PASSWORD` dictionary found in `snokel/contrib/brat/brat-v1.3_Crunchy_Frog/config.py`. This is useful if you would like to keep track of multiple annotator judgements for later adjudication or use as labeling functions as per our tutorial on using [Snorkel for Crowdsourcing](https://github.com/HazyResearch/snorkel/blob/master/tutorials/crowdsourcing/Crowdsourced_Sentiment_Analysis.ipynb).",
"_____no_output_____"
]
],
[
[
"brat.init_collection(\"spouse/train\", split=0)",
"_____no_output_____"
]
],
[
[
"We've already generated some BRAT annotations for you, so let's import an existing collection for purposes of this tutorial.",
"_____no_output_____"
]
],
[
[
"brat.import_collection(\"data/brat_spouse.zip\", overwrite=True)",
"Imported archive to /Users/fries/code/workshop/snorkel/snorkel/contrib/brat/brat-v1.3_Crunchy_Frog/data/\n"
]
],
[
[
"### b) Launch BRAT Interface in a New Window\nOnce our collection is initialized, we can view specific documents for annotation. The default mode is to generate a HTML link to a new BRAT browser window. Click this link to connect to launch the annotator editor. \n\nOptionally, you can launch BRAT in an embedded window by calling:\n\n brat.view(\"spouse/train\", doc, new_window=False)",
"_____no_output_____"
]
],
[
[
"doc_name = '5ede8912-59c9-4ba9-93df-c58cebb542b7'\ndoc = session.query(Document).filter(Document.name==doc_name).one()\n\nbrat.view(\"spouse/train\", doc)",
"_____no_output_____"
]
],
[
[
"If you do not have a specific document to edit, you can optionally launch BRAT and use their file browser to navigate through all files found in the target collection.",
"_____no_output_____"
]
],
[
[
"brat.view(\"spouse/train\")",
"_____no_output_____"
]
],
[
[
"## Step 3: Creating Gold Label Annotations\n### a) Annotating Named Entities\n`Spouse` relations consist of 2 `PERSON` named entities. When annotating our validation documents, \nthe first task is to identify our target entities. In this tutorial, we will annotate all `PERSON` \nmentions found in our example document, though for your application you may choose to only label \nthose that particpate in a true relation. \n\n<img align=\"right\" src=\"imgs/brat-anno-dialog.jpg\" width=\"400px\" style=\"margin-left:50px\">\n\nBegin by selecting and highlighting the text corresponding to a `PERSON` entity. Once highlighted, an annotation dialog will appear on your screen (see image of the BRAT Annotation Dialog Window to the right). If this is correct, click ok. Repeat this for every entity you find in the document.\n\n**Annotation Guidelines**\n\nWhen developing gold label annotations, you should always discuss and agree on a set of _annotator guidelines_ to share with human labelers. These are the guidelines we used to label the `Spouse` relation:\n\n- **<span style=\"color:red\">Do not</span>** include formal titles associated with professional roles e.g., _**Pastor** Jeff_, _**Prime Minister** Prayut Chan-O-Cha_\n- Do include English honorifics unrelated to a professional role, e.g., _**Mr.** John Cleese_.\n- **<span style=\"color:red\">Do not</span>** include family names/surnames that do not reference a single individual, e.g., _the Duggar family_.\n- Do include informal titles, stage names, fictional characters, and nicknames, e.g., _**Dog the Bounty Hunter**_\n- Include possessive's, e.g., _Anna**'s**_.",
"_____no_output_____"
],
[
"### b) Annotating Relations\n\nTo annotate `Spouse` relations, we look through all pairs of `PERSON` entities found within a single sentence. BRAT identifies the bounds of each sentence and renders a numbered row in the annotation window (see the left-most column in the image below). \n\n<img align=\"right\" src=\"imgs/brat-relation.jpg\" width=\"500px\" style=\"margin-left:50px\">\n\nAnnotating relations is done through simple drag and drop. Begin by clicking and holding on a single `PERSON` entity and then drag that entity to its corresponding spouse entity. That is it!\n\n**Annotation Guidelines**\n\n- Restrict `PERSON` pairs to those found in the same sentence.\n- The order of `PERSON` arguments does not matter in this application.\n- **<span style=\"color:red\">Do not</span>** include relations where a `PERSON` argument is wrong or otherwise incomplete.",
"_____no_output_____"
],
[
"## Step 4: Scoring Models using BRAT Labels\n\n### a) Evaluating System Recall\n\nCreating gold validation data with BRAT is a critical evaluation step because it allows us to compute an estimate of our model's _true recall_. When we create labeled data over a candidate set created by Snorkel, we miss mentions of relations that our candidate extraction step misses. This causes us to overestimate the system's true recall.\n\nIn the code below, we show how to map BRAT annotations to an existing set of Snorkel candidates and compute some associated metrics. ",
"_____no_output_____"
]
],
[
[
"train_cands = session.query(Spouse).filter(Spouse.split == 0).order_by(Spouse.id).all()",
"_____no_output_____"
]
],
[
[
"### b) Mapping BRAT Annotations to Snorkel Candidates\nWe annotated a single document using BRAT to illustrate the difference in scores when we factor in the effects of candidate generation. ",
"_____no_output_____"
]
],
[
[
"%time brat.import_gold_labels(session, \"spouse/train\", train_cands)",
"Mapped 7/14 (50%) of BRAT labels to candidates\n"
]
],
[
[
"Our candidate extractor only captures 7/14 (50%) of true mentions in this document. Our real system's recall is likely even worse, since we won't correctly predict the label for all true candidates. ",
"_____no_output_____"
],
[
"### c) Re-loading the Trained LSTM\nWe'll load the LSTM model we trained in [Workshop_4_Discriminative_Model_Training.ipynb](Workshop_4_Discriminative_Model_Training.ipynb) and use to to predict marginals for our test candidates.",
"_____no_output_____"
]
],
[
[
"test_cands = session.query(Spouse).filter(Spouse.split == 2).order_by(Spouse.id).all()",
"_____no_output_____"
],
[
"from snorkel.learning.disc_models.rnn import reRNN\n\nlstm = reRNN(seed=1701, n_threads=None)\nlstm.load(\"spouse.lstm\")",
"INFO:tensorflow:Restoring parameters from checkpoints/spouse.lstm/spouse.lstm-0\n[reRNN] Loaded model <spouse.lstm>\n"
],
[
"marginals = lstm.marginals(test_cands)",
"_____no_output_____"
]
],
[
[
"### d) Create a Subset of Test for Evaluation\n\nOur measures assume BRAT annotations are complete for the given set of documents! Rather than manually annotating the entire test set, we define a small subset of 10 test documents for hand lableing. We'll then compute the full, recall-corrected metrics for this subset.\n\nFirst, let's build a query to initalize this candidate collection.",
"_____no_output_____"
]
],
[
[
"doc_ids = set(open(\"data/brat_test_docs.tsv\",\"rb\").read().splitlines())\ncid_query = [c.id for c in test_cands if c.get_parent().document.name in doc_ids]\n\nbrat.init_collection(\"spouse/test-subset\", cid_query=cid_query)",
"Error! Collection at 'spouse/test-subset' already exists. Please set overwrite=True to erase all existing annotations.\n"
],
[
"brat.view(\"spouse/test-subset\")",
"_____no_output_____"
]
],
[
[
"### e) Comparing Unadjusted vs. Adjusted Scores ",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\nplt.hist(marginals, bins=20)\nplt.show()",
"_____no_output_____"
],
[
"from snorkel.annotations import load_gold_labels\n\nL_gold_dev = load_gold_labels(session, annotator_name='gold', split=1, load_as_array=True, zero_one=True)\nL_gold_test = load_gold_labels(session, annotator_name='gold', split=2, zero_one=True)",
"_____no_output_____"
]
],
[
[
"**Recall-uncorrected Score** If we don't account for candidates missed during extraction, our model score will overestimate real performance, as is the case for the model evaluation below.",
"_____no_output_____"
]
],
[
[
"brat.score(session, test_cands, marginals, \"spouse/test-subset\", recall_correction=False)",
"========================================\nUnadjusted BRAT Scores (10 Documents)\n========================================\nPos. class accuracy: 0.75\nNeg. class accuracy: 0.852\nPrecision 0.25\nRecall 0.75\nF1 0.375\n----------------------------------------\nTP: 6 | FP: 18 | TN: 104 | FN: 2\n========================================\n\n"
]
],
[
[
"**Recall-corrected Score** Though this is a small sample of documents, we see how missing candidates can impact our real system score. ",
"_____no_output_____"
]
],
[
[
"brat.score(session, test_cands, marginals, \"spouse/test-subset\")",
"========================================\nAdjusted BRAT Scores (10 Documents)\n========================================\nPos. class accuracy: 0.545\nNeg. class accuracy: 0.852\nPrecision 0.25\nRecall 0.545\nF1 0.343\n----------------------------------------\nTP: 6 | FP: 18 | TN: 104 | FN: 5\n========================================\n\n"
]
],
[
[
"This is the full model, evaluated on all our gold candidate labels. ",
"_____no_output_____"
]
],
[
[
"tp, fp, tn, fn = lstm.error_analysis(session, test_cands, L_gold_test)",
"========================================\nScores (Un-adjusted)\n========================================\nPos. class accuracy: 0.627\nNeg. class accuracy: 0.923\nPrecision 0.426\nRecall 0.627\nF1 0.507\n----------------------------------------\nTP: 281 | FP: 379 | TN: 4567 | FN: 167\n========================================\n\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a18048f6a3d952664b1fb8be1c9b2b5c9319fa4
| 119,855 |
ipynb
|
Jupyter Notebook
|
test.ipynb
|
ugo-nama-kun/cmc_vision
|
c61f2a672613335928c9043a3de20f8bd21e54f1
|
[
"MIT"
] | null | null | null |
test.ipynb
|
ugo-nama-kun/cmc_vision
|
c61f2a672613335928c9043a3de20f8bd21e54f1
|
[
"MIT"
] | null | null | null |
test.ipynb
|
ugo-nama-kun/cmc_vision
|
c61f2a672613335928c9043a3de20f8bd21e54f1
|
[
"MIT"
] | null | null | null | 363.19697 | 59,719 | 0.919937 |
[
[
[
"import copy\nimport numpy as np\n\nfrom dm_control import suite\n\nimport matplotlib\nimport matplotlib.animation as animation\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"def display_video(frames, framerate=30):\n height, width, _ = frames[0].shape\n dpi = 70\n orig_backend = matplotlib.get_backend()\n matplotlib.use('Agg') # Switch to headless 'Agg' to inhibit figure rendering.\n fig, ax = plt.subplots(1, 1, figsize=(width / dpi, height / dpi), dpi=dpi)\n matplotlib.use(orig_backend) # Switch back to the original backend.\n ax.set_axis_off()\n ax.set_aspect('equal')\n ax.set_position([0, 0, 1, 1])\n im = ax.imshow(frames[0])\n def update(frame):\n im.set_data(frame)\n return [im]\n interval = 1000/framerate\n anim = animation.FuncAnimation(fig=fig, func=update, frames=frames,\n interval=interval, blit=True, repeat=False)\n return anim\n",
"_____no_output_____"
],
[
"random_state = np.random.RandomState(42)\n\nenv = suite.load(\"cartpole\", \"balance\")\nspec = env.action_spec()\n\nduration = 4 # Seconds\nframes = []\nticks = []\nrewards = []\nobservations = []\n\nwhile env.physics.data.time < duration:\n action = random_state.uniform(spec.minimum, spec.maximum, spec.shape)\n time_step = env.step(action)\n\n camera0 = env.physics.render(camera_id=0, height=200, width=200)\n camera1 = env.physics.render(camera_id=1, height=200, width=200)\n frames.append(np.hstack((camera0, camera1)))\n rewards.append(time_step.reward)\n observations.append(copy.deepcopy(time_step.observation))\n ticks.append(env.physics.data.time)\n\n #print(env.physics.data.time, time_step)\n\nanim = display_video(frames, framerate=1./env.control_timestep())\n\nnum_sensors = len(time_step.observation)\n\n_, ax = plt.subplots(1 + num_sensors, 1, sharex=True, figsize=(4, 8))\nax[0].plot(ticks, rewards)\nax[0].set_ylabel('reward')\nax[-1].set_xlabel('time')\n\nfor i, key in enumerate(time_step.observation):\n data = np.asarray([observations[j][key] for j in range(len(observations))])\n ax[i+1].plot(ticks, data, label=key)\n ax[i+1].set_ylabel(key)\n\nwritervideo = animation.FFMpegWriter(fps=1./env.control_timestep())\nanim.save('cartpole.mp4', writer=writervideo)\n\nplt.figure()\nplt.imshow(camera1)",
"_____no_output_____"
],
[
"print(time_step)\nprint(np.concatenate(list(time_step.observation.values())))\n",
"TimeStep(step_type=<StepType.MID: 1>, reward=0.20659867770700488, discount=1.0, observation=OrderedDict([('position', array([-1.73035599, 0.29619756, -0.9551267 ])), ('velocity', array([-0.33523315, -3.91798672]))]))\n[-1.73035599 0.29619756 -0.9551267 -0.33523315 -3.91798672]\n"
],
[
"from util.util import DMC2GymWrapper\n\nenv_dmc = suite.load(\"cartpole\", \"balance\")\nenv_gym = DMC2GymWrapper(env_dmc, max_step=100)",
"_____no_output_____"
],
[
"obs = env_gym.reset()\nfor i in range(2000):\n obs, r, done, info = env_gym.step(env_gym.action_space.sample())\n print(env_gym._step, obs, r, done, info)\n if done:\n print(\"Done!\")\n break",
"1 [-0.09737361 0.99998947 0.00458917 -0.09099467 0.09518964] 0.8731861491077438 False {}\n2 [-0.09847117 0.99998307 0.0058193 -0.12851972 0.15086981] 0.9666418837481864 False {}\n3 [-0.10009599 0.99996935 0.00782977 -0.19644601 0.251286 ] 0.897620402958716 False {}\n4 [-0.10198274 0.99994762 0.01023497 -0.18090698 0.22983607] 0.9894654068965445 False {}\n5 [-0.10373544 0.99992239 0.01245855 -0.16963537 0.21496581] 0.9920545672644222 False {}\n6 [-0.10582824 0.99988451 0.01519786 -0.24892648 0.33301622] 0.8605088726909482 False {}\n7 [-0.10820895 0.99983107 0.01838036 -0.22722051 0.30365587] 0.9823788959030058 False {}\n8 [-0.11037813 0.99977357 0.02127944 -0.20661901 0.27635054] 0.9838959077012064 False {}\n9 [-0.11257462 0.99970595 0.02424887 -0.23268251 0.31776449] 0.9775738678093473 False {}\n10 [-0.11467175 0.99963252 0.02710752 -0.18674701 0.25422847] 0.9485577444116031 False {}\n11 [-0.11612281 0.99957768 0.02905961 -0.10346771 0.13639582] 0.8488483857497449 False {}\n12 [-0.11755185 0.99951873 0.03102107 -0.18234071 0.25612163] 0.8632376134829892 False {}\n13 [-0.11950175 0.99942899 0.03378885 -0.20764294 0.29779612] 0.9784210845374891 False {}\n14 [-0.1217335 0.99931463 0.03701707 -0.23871151 0.34833355] 0.9700449027771967 False {}\n15 [-0.12421008 0.9991732 0.04065597 -0.25660786 0.3800897 ] 0.9821107878769311 False {}\n16 [-0.12696102 0.99899805 0.04475368 -0.2935846 0.44030393] 0.9580922966158013 False {}\n17 [-0.12954883 0.99881459 0.04867654 -0.22398173 0.34523127] 0.8871572370727558 False {}\n18 [-0.13156648 0.99865562 0.05183573 -0.17955208 0.28748941] 0.948707176853938 False {}\n19 [-0.13331453 0.9985042 0.05467509 -0.17006103 0.28126287] 0.9885264358424203 False {}\n20 [-0.13486934 0.99835641 0.05731037 -0.14090594 0.24669023] 0.9729459402081582 False {}\n21 [-0.13596472 0.99823682 0.05935699 -0.07817165 0.16338635] 0.9091268678268738 False {}\n22 [-0.13644565 0.99816262 0.06059201 -0.01801618 0.08409543] 0.916532201684044 False {}\n23 [-0.13672295 0.99809988 0.06161674 -0.0374455 0.12126043] 0.9854652142798056 False {}\n24 [-0.13756688 0.99797826 0.06355618 -0.13134133 0.26743574] 0.8078087193345468 False {}\n25 [-0.13905518 0.99778469 0.06652608 -0.16632303 0.32787822] 0.963601071085677 False {}\n26 [-0.14084697 0.99754467 0.07003303 -0.19203833 0.375241 ] 0.9734361232432373 False {}\n27 [-0.14319382 0.9972249 0.07444792 -0.27733399 0.51015578] 0.8321333621864847 False {}\n28 [-0.14613711 0.996808 0.07983619 -0.31133094 0.57085448] 0.9545185817595451 False {}\n29 [-0.14931341 0.99632312 0.08567526 -0.32393513 0.60112698] 0.972578075810292 False {}\n30 [-0.15278944 0.9957526 0.0920693 -0.37127535 0.68291793] 0.9256241678783315 False {}\n31 [-0.1565177 0.99509169 0.09895718 -0.37438418 0.70116454] 0.9685102533078558 False {}\n32 [-0.16042897 0.99433979 0.10624678 -0.4078776 0.76467192] 0.9419772866231183 False {}\n33 [-0.16413174 0.9935516 0.11338088 -0.33268551 0.67103376] 0.8510223552185145 False {}\n34 [-0.1675224 0.99274729 0.1202199 -0.34545337 0.70637038] 0.9632603056383869 False {}\n35 [-0.17059227 0.99193314 0.12676215 -0.26853092 0.61235964] 0.8468927045766681 False {}\n36 [-0.17357639 0.99106825 0.13335564 -0.32829831 0.71778946] 0.8934160516267345 False {}\n37 [-0.17640084 0.9901651 0.1399038 -0.23660297 0.60443682] 0.7940509030051894 False {}\n38 [-0.17847506 0.98934818 0.1455685 -0.17824785 0.54038322] 0.9001907173738711 False {}\n39 [-0.18043653 0.98849195 0.15127348 -0.21405069 0.61353013] 0.9434260846328111 False {}\n40 [-0.18250387 0.98754458 0.15733948 -0.19942547 0.61453619] 0.9624907824672737 False {}\n41 [-0.18435924 0.98657313 0.16332013 -0.17165511 0.59743344] 0.9507164285555386 False {}\n42 [-0.18629369 0.98550637 0.16963841 -0.21523968 0.68425592] 0.9254458936647025 False {}\n43 [-0.18851386 0.98428351 0.1765955 -0.2287991 0.72866732] 0.9555328428178177 False {}\n44 [-0.190464 0.98303635 0.18341083 -0.16124081 0.6572341 ] 0.8657975163976079 False {}\n45 [-0.19204093 0.98179408 0.1899484 -0.15415091 0.67384637] 0.9589981700624719 False {}\n46 [-0.19341888 0.9805109 0.19646469 -0.12144864 0.65462133] 0.9372296129822779 False {}\n47 [-0.19510369 0.9790376 0.20367958 -0.21551355 0.81827636] 0.7762941398006852 False {}\n48 [-0.19690444 0.97741423 0.21133248 -0.14464847 0.74659485] 0.8478430411635277 False {}\n49 [-0.19850168 0.97572843 0.21898407 -0.17480448 0.82060614] 0.9307866624607399 False {}\n50 [-0.19990152 0.97397527 0.22665431 -0.10517534 0.75323758] 0.8489116329458989 False {}\n51 [-0.20133388 0.97207474 0.23467149 -0.18129742 0.89479743] 0.8287674519201536 False {}\n52 [-0.20301528 0.96994003 0.24334405 -0.15499302 0.89172983] 0.9235922857085533 False {}\n53 [-0.20418863 0.96782157 0.25163744 -0.07969198 0.82046833] 0.8244529354799259 False {}\n54 [-0.20478721 0.96574992 0.25947466 -0.04003341 0.80103982] 0.9090488744154812 False {}\n55 [-0.20528827 0.96355209 0.2675208 -0.06018309 0.86733618] 0.9307327344993668 False {}\n56 [-0.20559522 0.96125667 0.27565489 -0.00122005 0.82326699] 0.865793164507366 False {}\n57 [-0.20587077 0.95879289 0.28410595 -0.05388953 0.93748508] 0.8785877177540372 False {}\n58 [-0.20652338 0.95598011 0.29343148 -0.07663632 1.01083665] 0.9140942058698599 False {}\n59 [-0.20704903 0.95300264 0.302962 -0.02850865 0.98641161] 0.8740438332148837 False {}\n60 [-0.20710472 0.94999908 0.31225269 0.01735911 0.9666966 ] 0.8783824523542756 False {}\n61 [-0.20683873 0.94690468 0.32151443 0.03583121 0.98655621] 0.9121829731877952 False {}\n62 [-0.20648933 0.94360548 0.33107203 0.03404266 1.03589041] 0.9148490327926475 False {}\n63 [-0.20594352 0.94014014 0.34078809 0.07510759 1.02751149] 0.8776109580392077 False {}\n64 [-0.20515727 0.93651928 0.35061609 0.0821347 1.0675051 ] 0.9073983565519819 False {}\n65 [-0.20462687 0.93248761 0.36120196 0.02395134 1.19821934] 0.8350979288435967 False {}\n66 [-0.20446793 0.92795423 0.37269417 0.00783367 1.27286677] 0.882747848872848 False {}\n67 [-0.20466212 0.92288898 0.38506614 -0.04666476 1.40111468] 0.819936929558444 False {}\n68 [-0.20481892 0.91745836 0.39783182 0.01528658 1.37387794] 0.7945725405921772 False {}\n69 [-2.04736874e-01 9.11748691e-01 4.10748493e-01 1.12261766e-03\n 1.45090415e+00] 0.8585727145939683 False {}\n70 [-0.20439155 0.90575563 0.42380035 0.06792254 1.42196181] 0.7728937470775371 False {}\n71 [-0.20396945 0.89935499 0.43721916 0.01650916 1.55172484] 0.8008481913338026 False {}\n72 [-0.20371663 0.89237136 0.45130185 0.03404805 1.59252352] 0.8305410632579486 False {}\n73 [-0.20342909 0.88488679 0.46580615 0.02346367 1.672148 ] 0.8243691070824636 False {}\n74 [-0.20349759 0.87662361 0.48117673 -0.03714163 1.81829794] 0.7456968579087475 False {}\n75 [-0.20426777 0.86730245 0.49778154 -0.11685905 1.99036521] 0.6788044443088758 False {}\n76 [-0.20557538 0.85694674 0.51540497 -0.14464586 2.09819273] 0.7533001983260731 False {}\n77 [-0.206659 0.84598679 0.53320385 -0.07210119 2.08306146] 0.6689026189821333 False {}\n78 [-0.20735308 0.83450213 0.55100472 -0.06670591 2.15427937] 0.7475773458611256 False {}\n79 [-0.20836161 0.8219752 0.56952328 -0.13495444 2.3175055 ] 0.6537981352318871 False {}\n80 [-0.20930994 0.80859619 0.58836401 -0.05473469 2.3049029 ] 0.6132673345908193 False {}\n81 [-0.20986422 0.79455724 0.60718926 -0.05610136 2.39235853] 0.7045338537749356 False {}\n82 [-0.21048955 0.77948262 0.62642385 -0.06893402 2.49573094] 0.6848322541810745 False {}\n83 [-0.21086892 0.76354105 0.64575929 -0.00694766 2.51707013] 0.6172801426707112 False {}\n84 [-0.21135259 0.74642943 0.66546458 -0.08970427 2.70271615] 0.5541536478915575 False {}\n85 [-0.21214356 0.7279213 0.68566069 -0.06846553 2.77681823] 0.6290226310287588 False {}\n86 [-0.21251676 0.70848488 0.70572599 -0.00617289 2.81123308] 0.5690541660814503 False {}\n87 [-0.21292147 0.68774099 0.72595615 -0.07467586 2.98407232] 0.5371498313665113 False {}\n88 [-0.21368643 0.66536867 0.74651492 -0.07826045 3.09336576] 0.5810123322266102 False {}\n89 [-0.2141057 0.64181501 0.7668595 -0.00558304 3.13249627] 0.5007183365726176 False {}\n90 [-0.21393257 0.61721234 0.78679662 0.04024184 3.20183121] 0.5279850003186414 False {}\n91 [-0.21349843 0.5912718 0.80647235 0.04665395 3.31057771] 0.5353617797999413 False {}\n92 [-0.21312646 0.5637051 0.82597612 0.027833 3.44372889] 0.5105560966856626 False {}\n93 [-0.21259337 0.53460165 0.84510418 0.07883545 3.52272662] 0.4712114435665606 False {}\n94 [-0.21174612 0.50401653 0.86369401 0.09070129 3.63642355] 0.4799668891101611 False {}\n95 [-0.21123377 0.47148062 0.88187642 0.01192833 3.81805934] 0.3866561326527024 False {}\n96 [-0.21071957 0.43714053 0.89939321 0.09096886 3.8934029 ] 0.38300010616821184 False {}\n97 [-0.21021038 0.40097742 0.91608794 0.01104368 4.07284977] 0.35063626754888294 False {}\n98 [-2.10166743e-01 3.62692346e-01 9.31908935e-01 -2.17628680e-03\n 4.21296911e+00] 0.39884302556316314 False {}\n99 [-0.21052445 0.3223216 0.94663023 -0.06918259 4.38151002] 0.33382273388435324 False {}\n100 [-0.21156356 0.27974257 0.96007505 -0.13845127 4.54900569] 0.3126212579664319 True {}\nDone!\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a1804ed50e0f8d17fcd7afed960d868b7e7c132
| 146,639 |
ipynb
|
Jupyter Notebook
|
Pymaceuticals/.ipynb_checkpoints/pymaceuticals_starter-checkpoint.ipynb
|
johnrshows/matplotlib_hw
|
d1d35af519305d30dc4c2fa58e2963eee7e28e88
|
[
"ADSL"
] | null | null | null |
Pymaceuticals/.ipynb_checkpoints/pymaceuticals_starter-checkpoint.ipynb
|
johnrshows/matplotlib_hw
|
d1d35af519305d30dc4c2fa58e2963eee7e28e88
|
[
"ADSL"
] | null | null | null |
Pymaceuticals/.ipynb_checkpoints/pymaceuticals_starter-checkpoint.ipynb
|
johnrshows/matplotlib_hw
|
d1d35af519305d30dc4c2fa58e2963eee7e28e88
|
[
"ADSL"
] | null | null | null | 99.686608 | 16,648 | 0.779854 |
[
[
[
"## Observations and Insights ",
"_____no_output_____"
]
],
[
[
"# Dependencies and Setup\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport scipy.stats as st\nimport numpy as np\nfrom scipy.stats import sem\nfrom scipy.stats import linregress\n\n# Study data files\nmouse_metadata_path = \"data/Mouse_metadata.csv\"\nstudy_results_path = \"data/Study_results.csv\"\n\n# Read the mouse data and the study results\nmouse_metadata = pd.read_csv(mouse_metadata_path)\nstudy_results = pd.read_csv(study_results_path)\n\n# Combine the data into a single dataset\nstudy_data_complete = pd.merge(study_results, mouse_metadata, how=\"left\", on=\"Mouse ID\")\n\n# Display the data table for preview\nstudy_data_complete.head()\n",
"_____no_output_____"
],
[
"# Checking the number of mice.\n\nlen(study_data_complete[\"Mouse ID\"].unique())\n",
"_____no_output_____"
],
[
"# Getting the duplicate mice by ID number that shows up for Mouse ID and Timepoint. \n\nduplicate_mouse_ids = study_data_complete.loc[study_data_complete.duplicated(subset=['Mouse ID', 'Timepoint']),'Mouse ID'].unique()\nduplicate_mouse_ids",
"_____no_output_____"
],
[
"# Optional: Get all the data for the duplicate mouse ID. \n\nduplicate_mouse_data = study_data_complete.loc[study_data_complete[\"Mouse ID\"] == \"g989\"]\nduplicate_mouse_data",
"_____no_output_____"
],
[
"# Create a clean DataFrame by dropping the duplicate mouse by its ID.\n\nclean_study_data = study_data_complete[study_data_complete['Mouse ID'].isin(duplicate_mouse_ids)==False]\nclean_study_data.head()",
"_____no_output_____"
],
[
"# Checking the number of mice in the clean DataFrame.\n\nlen(clean_study_data[\"Mouse ID\"].unique())",
"_____no_output_____"
]
],
[
[
"## Summary Statistics",
"_____no_output_____"
]
],
[
[
"# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen\n\nmean = np.mean(clean_study_data[\"Tumor Volume (mm3)\"])\nmedian = np.median(clean_study_data[\"Tumor Volume (mm3)\"])\nvariance = np.var(clean_study_data[\"Tumor Volume (mm3)\"], ddof = 0)\nsd = np.std(clean_study_data[\"Tumor Volume (mm3)\"], ddof = 0)\nsample_volume = clean_study_data.sample(75)\nvolume = sem(sample_volume[\"Tumor Volume (mm3)\"])\n\nsummary_statistics = pd.DataFrame({\"Mean\":[mean],\n \"Median\":[median],\n \"Variance\":[variance],\n \"Standard Deviation\":[sd],\n \"SEM\":[volume],\n })\nsummary_statistics.head()\n# This method is the most straightforward, creating multiple series and putting them all together at the end.",
"_____no_output_____"
],
[
"# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen\n\nregimen = clean_study_data.groupby('Drug Regimen')\nregimen_mean = regimen.mean()\nregimen_median = regimen.median()\nregimen_variance = regimen.var()\nregimen_sd = regimen.std()\nregimen_sem = regimen.sem()\n\nsummary_statistics2 = pd.DataFrame({\"Mean\": regimen_mean[\"Tumor Volume (mm3)\"],\n \"Median\": regimen_median[\"Tumor Volume (mm3)\"],\n \"Variance\": regimen_variance[\"Tumor Volume (mm3)\"],\n \"Standard Deviation\": regimen_sd[\"Tumor Volume (mm3)\"],\n \"SEM\": regimen_sem[\"Tumor Volume (mm3)\"]\n })\nsummary_statistics2\n# This method produces everything in a single groupby function.",
"_____no_output_____"
]
],
[
[
"## Bar Plots",
"_____no_output_____"
]
],
[
[
"# Generate a bar plot showing the number of mice per time point for each treatment throughout the course of the study using pandas.\n\nmice_df = clean_study_data.groupby(\"Drug Regimen\")\nvar = mice_df['Mouse ID'].count()\nvar.plot(kind = 'bar',color ='r',title = \"Total Mice per Treatment\", alpha = .75, edgecolor = 'k')\nplt.ylabel('Number of Mice')\n\nplt.show()\n",
"_____no_output_____"
],
[
"# Generate a bar plot showing the number of mice per time point for each treatment throughout the course of the study using pyplot.\n\nplt.bar(var.index,var,color='r',alpha=.75,edgecolor='k')\nplt.xticks(rotation=90)\nplt.ylabel('Number of Mice')\nplt.xlabel('Regimen')\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Pie Plots",
"_____no_output_____"
]
],
[
[
"# Generate a pie plot showing the distribution of female versus male mice using pandas\n\ngender = mouse_metadata.loc[mouse_metadata['Mouse ID'] != 'g989']\ngender_plot = gender['Sex'].value_counts()\ngender_plot.plot(kind='pie', shadow = True, autopct = '%1.2f%%')\nplt.title('Number of Mice by Gender')\n\nplt.show()",
"_____no_output_____"
],
[
"# Generate a pie plot showing the distribution of female versus male mice using pyplot\n\nlabels = gender_plot.index\nsizes = gender_plot\nchart = plt.pie(sizes,autopct='%1.2f%%',labels=labels, shadow=True)\nplt.ylabel('Sex')\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Quartiles, Outliers and Boxplots",
"_____no_output_____"
]
],
[
[
"# Calculate the final tumor volume of each mouse across four of the most promising treatment regimens.\n\ntreatment = [\"Capomulin\", \"Ramicane\", \"Infubinol\", \"Ceftamin\"]\n\n#start by getting the last (greatest) timepoint for each mouse\ntimepoint_df = clean_study_data[['Mouse ID', 'Timepoint', 'Drug Regimen']]\n\nfiltered_df=timepoint_df[timepoint_df['Drug Regimen'].isin(treatment)]\n\ngrouped_df = filtered_df.groupby('Mouse ID')['Timepoint'].max()\n\n# merge this group df with the original dataframe to get the tumor volume at the last timepoint\n\nmerged_df = pd.merge(grouped_df,clean_study_data,on=['Mouse ID','Timepoint'],how = 'left')\nmerged_df.head()",
"_____no_output_____"
],
[
"#Put treatments into a list for a for loop (and later for plot labels)\n\n# Create empty list to fill with tumor vol data (for plotting)\n\n#tumor_vol_list = []\nfor drug in treatment:\n quartiles = merged_df[drug].quantile([.25,.5,.75]).round(2)\n lowerq = quartiles[.25].round(2)\n upperq = quartiles[.75].round(2)\n iqr = round(upperq-lowerq,2)\n lower_bound = round(lowerq - (1.5*iqr),2)\n upper_bound = round(upperq+(1.5*iqr),2)\n# Calculate the IQR and quantitatively determine if there are any potential outliers. \n\n# Locate the rows which contain mice on each drug and get the tumor volume\n\n# add subset \n\n# Determine outliers using upper and lower bounds\n",
"_____no_output_____"
],
[
"# Generate a box plot of the final tumor volume of each mouse across four regimens of interest",
"_____no_output_____"
]
],
[
[
"## Line and Scatter Plots",
"_____no_output_____"
]
],
[
[
"#capomulin df\n\ncapomulin_df = clean_study_data.loc[clean_study_data['Drug Regimen']=='Capomulin']\n\nprint(len(capomulin_df['Mouse ID'].unique()))\n\ncapomulin_df.head()",
"25\n"
],
[
"# Generate a line plot of time point versus tumor volume for a mouse treated with Capomulin\n\ncapomulin_mouse = clean_study_data.loc[clean_study_data['Mouse ID']=='u364']\nx_axis=capomulin_mouse['Timepoint']\ny_axis=capomulin_mouse['Tumor Volume (mm3)']\n\nplt.ylabel('Tumor Volume')\nplt.xlabel('Timepoint')\nplt.title('Timepoint vs. Tumor Volume')\nplt.plot(x_axis,y_axis)",
"_____no_output_____"
],
[
"# Generate a scatter plot of mouse weight versus average tumor volume for the Capomulin regimen\n\ncapomulin_mouse = clean_study_data.loc[clean_study_data['Drug Regimen']=='Capomulin']\ncapomulin_df = capomulin_mouse.groupby('Weight (g)')\n\nmean_tumor= capomulin_df['Tumor Volume (mm3)'].mean()\n\nweight_tumor=pd.DataFrame(mean_tumor).reset_index()\n\nweight_tumor.plot(kind='scatter',x='Weight (g)',y = 'Tumor Volume (mm3)')\nplt.title('Weight (g) vs. Tumor Volume (mm3)')\n\nplt.show()\n",
"_____no_output_____"
]
],
[
[
"## Correlation and Regression",
"_____no_output_____"
]
],
[
[
"# Calculate the correlation coefficient and linear regression model \n# for mouse weight and average tumor volume for the Capomulin regimen\n\nvar1 = weight_tumor['Weight (g)']\nvar2 = weight_tumor['Tumor Volume (mm3)']\ncorr = st.pearsonr(var1,var2)\nprint(f\"The correlation coefficient of weight and average tumor volume is {corr[0]}\")",
"The correlation coefficient of weight and average tumor volume is 0.9505243961855265\n"
],
[
"(slope, intercept, rvalue, pvalue, stderr) = linregress(var1,var2)\nregress_vals = var1*slope+intercept \nline_eq = \"y = \" + str(round(slope,2)) + \"x + \" + str(round(intercept,2))\nplt.scatter(var1,var2)\nplt.plot(var1, regress_vals,'r-')\nplt.annotate(line_eq,(20,37), fontsize= 15,color ='r')",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4a180902950e00ad5d4410dbaa3667b8c72e1a1e
| 31,722 |
ipynb
|
Jupyter Notebook
|
d007_ManySig_ndays.ipynb
|
WiSig-dataset/wisig-examples
|
afbf0a95bc888a7e3d9f94fa3442d36dace6e1fb
|
[
"BSD-3-Clause"
] | null | null | null |
d007_ManySig_ndays.ipynb
|
WiSig-dataset/wisig-examples
|
afbf0a95bc888a7e3d9f94fa3442d36dace6e1fb
|
[
"BSD-3-Clause"
] | null | null | null |
d007_ManySig_ndays.ipynb
|
WiSig-dataset/wisig-examples
|
afbf0a95bc888a7e3d9f94fa3442d36dace6e1fb
|
[
"BSD-3-Clause"
] | null | null | null | 79.904282 | 14,788 | 0.764643 |
[
[
[
"%load_ext autoreload\n\n%autoreload 2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pickle\nimport os\nimport os.path\n\nimport scipy,scipy.spatial\nimport matplotlib\nmatplotlib.rcParams['figure.dpi'] = 100\n\nfrom data_utilities import *\n# from definitions import *\n# from run_train_eval_net import run_train_eval_net,run_eval_net",
"_____no_output_____"
],
[
"import os\nGPU = \"1\"\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\" \nos.environ[\"CUDA_VISIBLE_DEVICES\"]=GPU",
"_____no_output_____"
],
[
"dataset_name = 'ManySig'\ndataset_path='../../orbit_rf_dataset/data/compact_pkl_datasets/'\n\ncompact_dataset = load_compact_pkl_dataset(dataset_path,dataset_name)\n\ntx_list = compact_dataset['tx_list']\nrx_list = [compact_dataset['rx_list'][0]]\n\nequalized = 0\n\ncapture_date_list = compact_dataset['capture_date_list']\nn_tx = len(tx_list)\nn_rx = len(rx_list)\nprint(n_tx,n_rx)",
"6 1\n"
],
[
"import tensorflow as tf\nimport tensorflow.keras as keras\nfrom tensorflow.keras import regularizers\nfrom tensorflow.keras.models import Sequential, Model\nfrom tensorflow.keras.layers import *\nimport tensorflow.keras.backend as K\n",
"/home/samer/miniconda3/envs/mod_framework/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:516: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n/home/samer/miniconda3/envs/mod_framework/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:517: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n/home/samer/miniconda3/envs/mod_framework/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:518: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n/home/samer/miniconda3/envs/mod_framework/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:519: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n/home/samer/miniconda3/envs/mod_framework/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:520: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n/home/samer/miniconda3/envs/mod_framework/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n/home/samer/miniconda3/envs/mod_framework/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:541: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n/home/samer/miniconda3/envs/mod_framework/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:542: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n/home/samer/miniconda3/envs/mod_framework/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:543: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n/home/samer/miniconda3/envs/mod_framework/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:544: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n/home/samer/miniconda3/envs/mod_framework/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:545: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n/home/samer/miniconda3/envs/mod_framework/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:550: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n"
],
[
" def create_net():\n\n inputs = Input(shape=(256,2))\n x = Reshape((256,2,1))(inputs)\n x = Conv2D(8,(3,2),activation='relu',padding = 'same')(x)\n x = MaxPool2D((2,1))(x)\n x = Conv2D(16,(3,2),activation='relu',padding = 'same')(x)\n x = MaxPool2D((2,1))(x)\n x = Conv2D(16,(3,2),activation='relu',padding = 'same')(x)\n x = MaxPool2D((2,2))(x)\n x = Conv2D(32,(3,1),activation='relu',padding = 'same')(x)\n x = MaxPool2D((2,1))(x)\n x = Conv2D(16,(3,1),activation='relu',padding = 'same')(x)\n #x = resnet(x,64,(3,2),'6')\n #x = MaxPool2D((2,2))(x)\n x = Flatten()(x)\n\n\n\n x = Dense(100, activation='relu', kernel_regularizer = keras.regularizers.l2(0.0001))(x)\n # x = Dropout(0.3)(x)\n x = Dense(80, activation='relu',kernel_regularizer = keras.regularizers.l2(0.0001))(x)\n x = Dropout(0.5)(x)\n x = Dense(n_tx, activation='softmax',kernel_regularizer = keras.regularizers.l2(0.0001))(x)\n ops = x\n\n classifier = Model(inputs,ops)\n classifier.compile(loss='categorical_crossentropy',metrics=['categorical_accuracy'],optimizer=keras.optimizers.Adam(0.0005))\n \n return classifier\n\nclassifier = create_net()\nclassifier.summary()",
"WARNING:tensorflow:From /home/samer/miniconda3/envs/mod_framework/lib/python3.6/site-packages/tensorflow/python/ops/init_ops.py:1251: calling VarianceScaling.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.\nInstructions for updating:\nCall initializer instance with the dtype argument instead of passing it to the constructor\nModel: \"model\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ninput_1 (InputLayer) [(None, 256, 2)] 0 \n_________________________________________________________________\nreshape (Reshape) (None, 256, 2, 1) 0 \n_________________________________________________________________\nconv2d (Conv2D) (None, 256, 2, 8) 56 \n_________________________________________________________________\nmax_pooling2d (MaxPooling2D) (None, 128, 2, 8) 0 \n_________________________________________________________________\nconv2d_1 (Conv2D) (None, 128, 2, 16) 784 \n_________________________________________________________________\nmax_pooling2d_1 (MaxPooling2 (None, 64, 2, 16) 0 \n_________________________________________________________________\nconv2d_2 (Conv2D) (None, 64, 2, 16) 1552 \n_________________________________________________________________\nmax_pooling2d_2 (MaxPooling2 (None, 32, 1, 16) 0 \n_________________________________________________________________\nconv2d_3 (Conv2D) (None, 32, 1, 32) 1568 \n_________________________________________________________________\nmax_pooling2d_3 (MaxPooling2 (None, 16, 1, 32) 0 \n_________________________________________________________________\nconv2d_4 (Conv2D) (None, 16, 1, 16) 1552 \n_________________________________________________________________\nflatten (Flatten) (None, 256) 0 \n_________________________________________________________________\ndense (Dense) (None, 100) 25700 \n_________________________________________________________________\ndense_1 (Dense) (None, 80) 8080 \n_________________________________________________________________\ndropout (Dropout) (None, 80) 0 \n_________________________________________________________________\ndense_2 (Dense) (None, 6) 486 \n=================================================================\nTotal params: 39,778\nTrainable params: 39,778\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"def evaluate_test(classifier):\n pred = classifier.predict(sig_dfTest)\n acc = np.mean(np.argmax(pred,1)==txidNum_dfTest)\n\n test_indx = ()\n for indx in range(len(tx_list)):\n cls_indx = np.where(txidNum_dfTest == indx)\n test_indx = test_indx + (cls_indx[0][:n_test_samples],)\n test_indx = np.concatenate(test_indx) \n acc_bal = np.mean(np.argmax(pred[test_indx,:],1)==txidNum_dfTest[test_indx])\n return acc,acc_bal",
"_____no_output_____"
],
[
"TRAIN = True\ncontinue_training = True\nnreal = 5\n\nreal_list = list(range(nreal))\n\n\n\npatience = 5\nn_epochs = 100\n\n\n\ncapture_date_test_list = capture_date_list[-1]\n\ndataset_test = merge_compact_dataset(compact_dataset,capture_date_test_list,tx_list,rx_list, equalized=equalized)\n \ntest_augset_dfDay,_,_ = prepare_dataset(dataset_test,tx_list,\n val_frac=0, test_frac=0)\n[sig_dfTest,txidNum_dfTest,txid_dfTest,cls_weights] = test_augset_dfDay\n\nsmTest_results_real = []\ndfTest_results_real = []\n\n\nfor nday in range(3):\n print(\"\");print(\"\")\n print(\"nday: {} \".format(nday))\n fname_w = 'weights/d007_{:04d}.hd5'.format(nday)\n rx_train_list= rx_list\n\n dataset = merge_compact_dataset(compact_dataset,capture_date_list[:nday+1],tx_list,rx_list, equalized=equalized)\n \n\n\n train_augset,val_augset,test_augset_smRx = prepare_dataset(dataset,tx_list,\n val_frac=0.1, test_frac=0.1)\n [sig_train,txidNum_train,txid_train,cls_weights] = train_augset\n [sig_valid,txidNum_valid,txid_valid,_] = val_augset\n [sig_smTest,txidNum_smTest,txid_smTest,cls_weights] = test_augset_smRx\n\n if continue_training:\n skip = os.path.isfile(fname_w)\n else:\n skip = False\n classifier = create_net()\n if TRAIN and not skip:\n filepath = 't_weights_'+GPU\n c=[ keras.callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True),\n keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience)]\n history = classifier.fit(sig_train,txid_train,class_weight=cls_weights,\n validation_data=(sig_valid , txid_valid),callbacks=c, epochs=n_epochs)\n classifier.load_weights(filepath)\n classifier.save_weights(fname_w,save_format=\"h5\")\n else:\n classifier.load_weights(fname_w)\n\n smTest_r = classifier.evaluate(sig_smTest,txid_smTest,verbose=0)[1]\n dfTest_r = classifier.evaluate(sig_dfTest,txid_dfTest,verbose=0)[1]\n\n\n print(smTest_r,dfTest_r)\n smTest_results_real.append(smTest_r)\n dfTest_results_real.append(dfTest_r)\n K.clear_session()\n \n \n \n",
"/home/samer/Documents/txid_framework/dataset_scripts/working/data_utilities.py:119: RuntimeWarning: invalid value encountered in true_divide\n cls_weights = np.max(stat,axis=0)/stat\n"
],
[
"plt.plot(range(1,4),smTest_results_real)\nplt.plot(range(1,4),dfTest_results_real)\nplt.xlabel('No of Training Days')\nplt.ylabel('Accuracy')\nplt.legend(['Same Day','Diff Day'])\nprint(range(1,4))\nprint(smTest_results_real)\nprint(dfTest_results_real)\n",
"range(1, 4)\n[0.99833333, 0.9975, 0.9988889]\n[0.4935, 0.7406667, 0.82916665]\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a180c9e868427525bf5c045282f880b80b9933f
| 319,150 |
ipynb
|
Jupyter Notebook
|
notebooks/ROI/03_ClimateChange/S1_S3_SLR/01_14_Validation.ipynb
|
teslakit/teslak
|
3f3dda08c5c5998cb2a7debbf22f2be675a4ff8b
|
[
"MIT"
] | 12 |
2019-11-14T22:19:12.000Z
|
2022-03-04T01:25:33.000Z
|
notebooks/ROI/03_ClimateChange/S1_S3_SLR/01_14_Validation.ipynb
|
anderdyl/teslaCoSMoS
|
1495bfa2364ddbacb802d145b456a35213abfb7c
|
[
"MIT"
] | 5 |
2020-03-24T18:21:41.000Z
|
2021-08-23T20:39:43.000Z
|
notebooks/ROI/03_ClimateChange/S1_S3_SLR/01_14_Validation.ipynb
|
anderdyl/teslaCoSMoS
|
1495bfa2364ddbacb802d145b456a35213abfb7c
|
[
"MIT"
] | 2 |
2021-03-06T07:54:41.000Z
|
2021-06-30T14:33:22.000Z
| 914.469914 | 100,140 | 0.955027 |
[
[
[
"\n... ***CURRENTLY UNDER DEVELOPMENT*** ...\n",
"_____no_output_____"
],
[
"## Validation of the total water level\n\ninputs required: \n * historical wave conditions\n * emulator output - synthetic wave conditions of TWL \n * emulator output - synthetic wave conditions of TWL with 3 scenarios of SLR\n\n\nin this notebook:\n * Comparison of the extreme distributions\n \n",
"_____no_output_____"
]
],
[
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# common\nimport os\nimport os.path as op\n\n# pip\nimport numpy as np\nimport xarray as xr\nfrom datetime import datetime\nimport matplotlib.pyplot as plt\n\n# DEV: override installed teslakit\nimport sys\nsys.path.insert(0, op.join(os.path.abspath(''), '..', '..', '..', '..'))\n\n# teslakit\nfrom teslakit.database import Database\nfrom teslakit.climate_emulator import Climate_Emulator\nfrom teslakit.extremes import Peaks_Over_Threshold as POT\nfrom teslakit.util.time_operations import xds_reindex_daily\n\nfrom teslakit.plotting.extremes import Plot_ReturnPeriodValidation_CC\nfrom teslakit.plotting.estela import Plot_DWTs_Probs\nfrom teslakit.plotting.wts import Plot_Probs_WT_WT\nfrom teslakit.plotting.outputs import Plot_LevelVariables_Histograms\n",
"_____no_output_____"
]
],
[
[
"\n## Database and Site parameters",
"_____no_output_____"
]
],
[
[
"# --------------------------------------\n# Teslakit database\n\np_data = r'/Users/albacid/Projects/TeslaKit_projects'\n\n# offshore\ndb = Database(p_data)\ndb.SetSite('ROI')\n\n# climate change - S1\ndb_S1 = Database(p_data)\ndb_S1.SetSite('ROI_CC_S1')\n\n# climate change - S2\ndb_S2 = Database(p_data)\ndb_S2.SetSite('ROI_CC_S2')\n\n# climate change - S3\ndb_S3 = Database(p_data)\ndb_S3.SetSite('ROI_CC_S3')\n\n",
"_____no_output_____"
],
[
"# --------------------------------------\n# Load complete hourly data for extremes analysis\n\n# Historical\nHIST_C_h = db.Load_HIST_OFFSHORE(vns=['TWL'],decode_times=True)\n\n# Simulation (1000 yrs)\nSIM_C_h = db.Load_SIM_OFFSHORE_all(vns=['TWL'], decode_times=True, use_cftime=True)\n\n# Simulation climate change S1 (100 yrs)\nSIM_C_h_CChange_S1 = db_S1.Load_SIM_OFFSHORE_all(decode_times=True, use_cftime=True)\n\n# Simulation climate change S2 (100 yrs)\nSIM_C_h_CChange_S2 = db_S2.Load_SIM_OFFSHORE_all(decode_times=True, use_cftime=True)\n\n# Simulation climate change S3 (100 yrs)\nSIM_C_h_CChange_S3 = db_S3.Load_SIM_OFFSHORE_all(decode_times=True, use_cftime=True)\n",
"_____no_output_____"
],
[
"# Keep first 100 years of simulation without climate change\nSIM_C_h = SIM_C_h.isel(time=slice(0, len(SIM_C_h_CChange_S1.time))) # 100 years\n",
"_____no_output_____"
]
],
[
[
"\n## Level Variables (TWL) - Histograms",
"_____no_output_____"
]
],
[
[
"from teslakit.plotting.outputs import axplot_compare_histograms\nfrom teslakit.plotting.config import _faspect, _fsize\nimport matplotlib.gridspec as gridspec\n\n# Plot TWL histogram comparison between historical and simulated data for different SLR scenarios\n\ndata_fit = HIST_C_h['TWL'].values[:]; data_fit = data_fit[~np.isnan(data_fit)]\ndata_sim = SIM_C_h['TWL'].sel(n_sim = 0).values[:]; data_sim = data_sim[~np.isnan(data_sim)]\ndata_sim_1 = SIM_C_h_CChange_S1['TWL'].sel(n_sim = 0).values[:]; data_sim_1 = data_sim_1[~np.isnan(data_sim_1)]\ndata_sim_2 = SIM_C_h_CChange_S2['TWL'].sel(n_sim = 0).values[:]; data_sim_2 = data_sim_2[~np.isnan(data_sim_2)]\ndata_sim_3 = SIM_C_h_CChange_S3['TWL'].sel(n_sim = 0).values[:]; data_sim_3 = data_sim_3[~np.isnan(data_sim_3)]\n\n\n# plot figure\nfig = plt.figure(figsize=(_faspect*_fsize, _fsize*2/2.3))\ngs = gridspec.GridSpec(2, 2) \nn_bins = np.linspace(np.nanmin([np.nanmin(data_fit), np.nanmin(data_sim_3)]),np.nanmax([np.nanmax(data_fit), np.nanmax(data_sim_3)]), 40)\n\nax = plt.subplot(gs[0, 0])\naxplot_compare_histograms(ax, data_fit, data_sim, ttl='TWL', n_bins=n_bins,\n color_1='white', color_2='skyblue', alpha_1=0.9, alpha_2=0.7,\n label_1='Historical', label_2='Simulation')\n\nax = plt.subplot(gs[0, 1])\naxplot_compare_histograms(ax, data_sim, data_sim_1, ttl='TWL', n_bins=n_bins,\n color_1='white', color_2='skyblue', alpha_1=0.9, alpha_2=0.7,\n label_1='Simulation', label_2='Simulation Climate Change S1')\n\nax = plt.subplot(gs[1, 0])\naxplot_compare_histograms(ax, data_sim, data_sim_2, ttl='TWL', n_bins=n_bins,\n color_1='white', color_2='skyblue', alpha_1=0.9, alpha_2=0.7,\n label_1='Simulation', label_2='Simulation Climate Change S2')\n\nax = plt.subplot(gs[1, 1])\naxplot_compare_histograms(ax, data_sim, data_sim_3, ttl='TWL', n_bins=n_bins,\n color_1='white', color_2='skyblue', alpha_1=0.9, alpha_2=0.7,\n label_1='Simulation', label_2='Simulation Climate Change S3')\n\n",
"_____no_output_____"
]
],
[
[
"\n## TWL - Annual Maxima for different SLR scenarios",
"_____no_output_____"
]
],
[
[
"# Plot TWL annual maxima\n\n# calculate Annual Maxima values for historical and simulated data\nhist_A = HIST_C_h['TWL'].groupby('time.year').max(dim='time')\nsim_A = SIM_C_h['TWL'].groupby('time.year').max(dim='time')",
"_____no_output_____"
]
],
[
[
"### SLR S1 (intermediate low, +0.5m)",
"_____no_output_____"
]
],
[
[
"sim_B = SIM_C_h_CChange_S1['TWL'].groupby('time.year').max(dim='time')\n\n# Return Period historical vs. simulations\nPlot_ReturnPeriodValidation_CC(hist_A, sim_A.transpose(), sim_B.transpose());\n",
"_____no_output_____"
]
],
[
[
"### SLR S2 (intermediate, +1m)",
"_____no_output_____"
]
],
[
[
"sim_B = SIM_C_h_CChange_S2['TWL'].groupby('time.year').max(dim='time')\n\n# Return Period historical vs. simulations\nPlot_ReturnPeriodValidation_CC(hist_A, sim_A.transpose(), sim_B.transpose());",
"_____no_output_____"
]
],
[
[
"### SLR S3 (intermediate high, +1.5m)\n",
"_____no_output_____"
]
],
[
[
"sim_B = SIM_C_h_CChange_S3['TWL'].groupby('time.year').max(dim='time')\n\n# Return Period historical vs. simulations\nPlot_ReturnPeriodValidation_CC(hist_A, sim_A.transpose(), sim_B.transpose());",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a1815b90896a2cfa4dd8794ce746ff057459f4c
| 60,347 |
ipynb
|
Jupyter Notebook
|
002_Python_String.ipynb
|
honeypeachh/02_Python_Datatypes
|
57828db90aa8d960c62612ed33bb985483442e8a
|
[
"MIT"
] | 4 |
2021-09-23T05:44:55.000Z
|
2021-12-24T02:41:31.000Z
|
002_Python_String.ipynb
|
Wangcx225/02_Python_Datatypes
|
57828db90aa8d960c62612ed33bb985483442e8a
|
[
"MIT"
] | null | null | null |
002_Python_String.ipynb
|
Wangcx225/02_Python_Datatypes
|
57828db90aa8d960c62612ed33bb985483442e8a
|
[
"MIT"
] | null | null | null | 29.423208 | 655 | 0.540607 |
[
[
[
"<small><small><i>\nAll the IPython Notebooks in this lecture series by Dr. Milan Parmar are available @ **[GitHub](https://github.com/milaan9/02_Python_Datatypes)**\n</i></small></small>",
"_____no_output_____"
],
[
"# Python Strings\n\nIn this class you will learn to create, format, modify and delete strings in Python. Also, you will be introduced to various string operations and functions.",
"_____no_output_____"
],
[
"## What is String in Python?\n\nA string is a built-in type sequence of characters. It is used to handle **textual data** in python. Python **Strings are immutable sequences** of **Unicode** points. Creating Strings are simplest and easy to use in Python.\n\nA character is simply a symbol. For example, the English language has 26 characters.\n\nComputers do not deal with characters, they deal with numbers (binary). Even though you may see characters on your screen, internally it is stored and manipulated as a combination of 0s and 1s.\n\nThis conversion of character to a number is called encoding, and the reverse process is decoding. ASCII and Unicode are some of the popular encodings used.\n\nIn Python, a string is a sequence of Unicode characters. Unicode was introduced to include every character in all languages and bring uniformity in encoding. These Unicodes range from **$0_{hex}$** to **$10FFFF_{hex}$**. Normally, a Unicode is referred to by writing **\"U+\"** followed by its **hexadecimal** number. Thus strings in Python are a sequence of Unicode values. You can learn about Unicode from **[Python Unicode](https://docs.python.org/3.3/howto/unicode.html)**.\n\n<div>\n<img src=\"img/s0.png\" width=\"600\"/>\n</div>",
"_____no_output_____"
],
[
"## How to create a string in Python?\n\nStrings can be created by enclosing characters inside a **single quote** or **double-quotes**. Even **triple quotes** can be used in Python but generally used to represent multiline strings and docstrings.",
"_____no_output_____"
]
],
[
[
"# Example:\n\n# defining strings in Python\n# all of the following are equivalent\nmy_string = 'Hello'\nprint(my_string)\n\nmy_string = \"Hello\"\nprint(my_string)\n\nmy_string = '''Hello'''\nprint(my_string)\n\n# triple quotes string can extend multiple lines\nmy_string = \"\"\"Hello, welcome to\n the world of Python\"\"\"\nprint(my_string)",
"Hello\nHello\nHello\nHello, welcome to\n the world of Python\n"
],
[
"a = \"Hello,\"\nb= 'World!'\nprint(a+b)\nprint(a+\" \"+b)",
"Hello,World!\nHello, World!\n"
],
[
"string1='World'\nstring2='!'\nprint('Hello,' + \" \" + string1 + string2)",
"Hello, World!\n"
]
],
[
[
"## How to access characters in a string?\n\n* In Python, Strings are stored as individual characters in a **contiguous memory location**. \n\n* The benefit of using String is that it can be accessed from both the **directions** (forward and backward).\n\n* Both forward as well as backward indexing are provided using Strings in Python.\n\n* Forward indexing starts with **`0,1,2,3,.... `**\n\n* Backward indexing starts with **`-1,-2,-3,-4,.... `**\n\n* Trying to access a character out of index range will raise an **`IndexError`**. The index must be an integer. We can't use floats or other types, this will result into **`IndexError`**.\n\n* Strings can be indexed with square brackets. Indexing starts from zero in Python.\n\n* We can access a range of items in a string by using the slicing operator **`:`**(colon).\n\n* And the **`len()`** function provides the length of a string\n\n```python\nstr[0] = 'P' = str[-6] ,\nstr[1] = 'Y' = str[-5] ,\nstr[2] = 'T' = str[-4] ,\nstr[3] = 'H' = str[-3] ,\nstr[4] = 'O' = str[-2] , # refers to the second last item\nstr[5] = 'N' = str[-1]. # refers to the last item\n```\n\n<div>\n<img src=\"img/s3.png\" width=\"300\"/>\n</div>",
"_____no_output_____"
]
],
[
[
"# Accessing string characters in Python\nstr = 'PYTHON'\nprint('str = ', str)\n\n#first character\nprint('str[0] = ', str[0])\n\n#last character\nprint('str[-1] = ', str[-1])\n\n#slicing 2nd to 5th character\nprint('str[1:5] = ', str[1:5])\n\n#slicing 6th to 2nd last character\nprint('str[5:-2] = ', str[3:-1])",
"str = PYTHON\nstr[0] = P\nstr[-1] = N\nstr[1:5] = YTHO\nstr[5:-2] = HO\n"
]
],
[
[
"If we try to access an index out of the range or use numbers other than an integer, we will get errors.",
"_____no_output_____"
]
],
[
[
"# Accessing string characters in Python\nstr = 'PYTHON'\nprint('str = ', str)\n\n# index must be in range\nprint('str[15] = ', str[15])",
"str = PYTHON\n"
],
[
"# Accessing string characters in Python\nstr = 'PYTHON'\nprint('str = ', str)\n\n# index must be an integer\nprint('str[1.50] = ', str[1.5])",
"str = PYTHON\n"
],
[
"s = '123456789' #Indexing strats from 0 to 8\nprint(\"The string '%s' string is %d characters long\" %(s, len(s)) )\nprint('First character of',s,'is',s[0])\nprint('Last character of',s,'is',s[8])\nprint('Last character of',s,'is',s[len(s)-1]) # [9-1] = [8] is 9",
"The string '123456789' string is 9 characters long\nFirst character of 123456789 is 1\nLast character of 123456789 is 9\nLast character of 123456789 is 9\n"
]
],
[
[
"Negative indices can be used to start counting from the back",
"_____no_output_____"
]
],
[
[
"print('First character of',s,'is',s[-len(s)])\nprint('First character of',s,'is',s[(-9)])\nprint('Second character of',s,'is',s[(-8)])\nprint('Last character of',s,'is',s[-1])",
"First character of 123456789 is 1\nFirst character of 123456789 is 1\nSecond character of 123456789 is 2\nLast character of 123456789 is 9\n"
]
],
[
[
"Finally a substring (range of characters) an be specified as using $a:b$ to specify the characters at index $a,a+1,\\ldots,b-1$. Note that the last charcter is *not* included.",
"_____no_output_____"
]
],
[
[
"print(\"First three characters\",s[0:3])\nprint(\"Next three characters\",s[3:6])",
"First three characters 123\nNext three characters 456\n"
]
],
[
[
"An empty beginning and end of the range denotes the beginning/end of the string:",
"_____no_output_____"
]
],
[
[
"s = '123456789' #Indexing strats from 0 to 8\nprint(\"First three characters\", s[:3])\nprint(\"Last three characters\", s[-3:])",
"First three characters 123\nLast three characters 789\n"
],
[
"# Here, we are creating a simple program to retrieve String in reverse as well as normal form.\n\nname=\"Milan\"\nlength=len(name)\ni=0\n\nfor n in range(-1,(-length-1),-1):\n print(name[i],\"\\t\",name[n])\n i+=1",
"M \t n\ni \t a\nl \t l\na \t i\nn \t M\n"
]
],
[
[
"## How to slice a string in Python?\n\nPython String **slice** can be defined as a **substring** which is the part of the string. Therefore further substring can be obtained from a string.\n\nThere can be many forms to slice a string, as string can be accessed or indexed from both the direction and hence string can also be sliced from both the directions.\n\nSlicing can be best visualized by considering the index to be between the elements as shown below.\n\nIf we want to access a range, we need the index that will slice the portion from the string.\n\n<div>\n<img src=\"img/s16.png\" width=\"300\"/>\n</div>\n\n**Syntax** of Slice Operator :\n\n```python\nstr[start : stop : step ]\n```\n\nother syntax of slice:\n\n```python\nstr[start : stop] # items start through stop-1\n\nstr[start : ] # items start through the rest of the array\n\nstr[ : stop] # items from the beginning through stop-1\n\nstr[ : ] # a copy of the whole array\n```",
"_____no_output_____"
]
],
[
[
"# Example: \n\ns=\"Milan Python\"\n\nprint(s[6:10])\nprint(s[-12:-7])\nprint(s[-1: :-1]) #reversed all string\nprint(s[2: 10: 2]) #step = 2\nprint(s[ : : -1]) #reversed all string\nprint(s[ : 5]) #from 0 to 4\nprint(s[3 : ]) #from 3 to end of the string\nprint(s[ : ]) #copy all string",
"Pyth\nMilan\nnohtyP naliM\nlnPt\nnohtyP naliM\nMilan\nan Python\nMilan Python\n"
]
],
[
[
"**NOTE**: Both the operands passed for concatenation must be of same type, else it will show an error.",
"_____no_output_____"
],
[
"## Breaking appart strings\n\nWhen processing text, the ability to split strings appart is particularly useful. \n\n* `partition(separator)`: breaks a string into three parts based on a separator\n\n* `split()`: breaks string into words separated by white-space (optionally takes a separator as argument)\n\n* `join()`: joins the result of a split using string as separator",
"_____no_output_____"
]
],
[
[
"s = \"one -> two -> three\"\nprint( s.partition(\"->\") )\nprint( s.split() )\nprint( s.split(\" -> \") )\nprint( \";\".join( s.split(\" -> \") ) )",
"('one ', '->', ' two -> three')\n['one', '->', 'two', '->', 'three']\n['one', 'two', 'three']\none;two;three\n"
],
[
"\"This will split all words into a list\".split()",
"_____no_output_____"
],
[
"' '.join(['This', 'will', 'join', 'all', 'words', 'into', 'a', 'string'])",
"_____no_output_____"
],
[
"'Happy New Year'.find('ew')",
"_____no_output_____"
],
[
"'Happy New Year'.replace('Happy','Brilliant')",
"_____no_output_____"
]
],
[
[
"## How to change or delete a string?\n\nStrings are immutable. This means that elements of a string cannot be changed once they have been assigned. We can simply reassign different strings to the same name.",
"_____no_output_____"
]
],
[
[
"my_string = 'python'\nmy_string[5] = 'a'",
"_____no_output_____"
],
[
"s='012345'\nsX=s[:2]+'X'+s[3:] # this creates a new string with 2 replaced by X\nprint(\"creating new string\",sX,\"OK\")\n\nsX=s.replace('2','X') # the same thing\nprint(sX,\"still OK\")\n\ns[2] = 'X' # an error!!!",
"creating new string 01X345 OK\n01X345 still OK\n"
]
],
[
[
"We cannot delete or remove characters from a string. But deleting the string entirely is possible using the **`del`** keyword.",
"_____no_output_____"
]
],
[
[
"my_string = 'python'\ndel my_string[1] # deleting element of string generates error!",
"_____no_output_____"
],
[
"my_string = 'python'\ndel my_string # deleting whole string using 'del' keyword can delete it.\nmy_string",
"_____no_output_____"
]
],
[
[
"## Python Strings Operations\n\nThere are many operations that can be performed with strings which makes it one of the most used data types in Python.\n\nTo learn more about the data types available in Python visit: **[Python Data Types](https://github.com/milaan9/01_Python_Introduction/blob/main/009_Python_Data_Types.ipynb)**.\n\nTo perform operation on string, Python provides basically 3 types of Operators that are given below.\n\n* Basic Operators/Concatenation of Two or More Strings.\n* Membership Operators.\n* Relational Operators.",
"_____no_output_____"
],
[
"### 1. Basic Operators for concatenation of two or more strings\n\nThere are two types of basic operators in String **`+`** and **`*`**.\n\nThe **`+`** (concatenation) operator can be used to concatenates two or more string literals together.\n\nThe **`*`** (Replication) operator can be used to repeat the string for a given number of times.\n\n#### String Concatenation Operator (**`+`**)\nJoining of two or more strings into a single one is called concatenation.",
"_____no_output_____"
]
],
[
[
"# Example:\n\nstr1=\"Hello\"\nstr2=\"World!\"\nprint(str1+str2)",
"HelloWorld!\n"
]
],
[
[
"| Expression | Output |\n|:----| :--- |\n| **`\"10\" + \"50\"`** | **\"1050\"** | \n| **`\"hello\" + \"009\"`** | **\"hello009\"** | \n| **`\"hello99\" + \"world66\" `** | **\"hello99world66\"** | \n\n>**Note:** Both the operands passed for concatenation must be of same type, else it will show an error.",
"_____no_output_____"
]
],
[
[
"# Example:\n\nprint(\"HelloWorld\"+99)",
"_____no_output_____"
]
],
[
[
"#### Python String Replication Operator (**`*`**)\n\n**Replication operator** uses two parameters for operation, One is the integer value and the other one is the String argument.\n\nThe Replication operator is used to **repeat a string** number of times. The string will be repeated the number of times which is given by the **integer value**.\n\n| Expression | Output |\n|:----| :--- |\n| **`\"ArcX\" \\* 2`** | **\"ArcXArcX\"** | \n| **`3 *'5'`** | **\"555\"** | \n| **`'@'* 5 `** | **\"@@@@@\"** | \n\n>**Note:**: We can use Replication operator in any way i.e., int **`*`** string or string **`*`** **`int`**. Both the parameters passed cannot be of same type.",
"_____no_output_____"
]
],
[
[
"# Example:\n\nprint(\"HelloWorld\" * 5)\nprint(3 * \"Python\")",
"HelloWorldHelloWorldHelloWorldHelloWorldHelloWorld\nPythonPythonPython\n"
],
[
"print(\"Hello World! \"*5) #note the space in between 'Hello' and 'World!'",
"Hello World! Hello World! Hello World! Hello World! Hello World! \n"
],
[
"# Python String Operations\nstr1 = 'Hello'\nstr2 ='World!'\n\n# using +\nprint('str1 + str2 = ', str1 + str2)\n\n# using *\nprint('str1 * 3 =', str1 * 3)",
"str1 + str2 = HelloWorld!\nstr1 * 3 = HelloHelloHello\n"
]
],
[
[
"If we want to concatenate strings in different lines, we can use parentheses **`()`**.",
"_____no_output_____"
]
],
[
[
"# two string literals together\n'Hello ''World!'",
"_____no_output_____"
],
[
"# using parentheses\ns = ('Hello '\n 'World')\ns",
"_____no_output_____"
]
],
[
[
"### Iterating Through a string\n\nWe can iterate through a string using a **[for loop](https://github.com/milaan9/03_Python_Flow_Control/blob/main/005_Python_for_Loop.ipynb)**. Here is an example to count the number of 'l's in a string.",
"_____no_output_____"
]
],
[
[
"# Iterating through a string\ncount = 0\nfor letter in 'Hello World':\n if(letter == 'l'):\n count += 1\nprint(count,'letters found')",
"3 letters found\n"
]
],
[
[
"### 2. Python String Membership Operators\n\nMembership Operators are already discussed in the Operators section. Let see with context of String.\n\nThere are two types of Membership operators :\n\n1. **`in`** - \"in\" operator returns true if a character or the entire substring is present in the specified string, otherwise false.\n\n2. **`not in`** - \"not in\" operator returns true if a character or entire substring does not exist in the specified string, otherwise false.",
"_____no_output_____"
]
],
[
[
"# Example:\n\nstr1=\"HelloWorld\"\nstr2=\"Hello\"\nstr3=\"World\"\nstr4=\"Milan\"\n\nprint('Exmple of in operator ::')\nprint(str2 in str1)\nprint(str3 in str1)\nprint(str4 in str1)\nprint()\nprint(str2 not in str1)\nprint(str3 not in str1)\nprint(str4 not in str1)",
"Exmple of in operator ::\nTrue\nTrue\nFalse\n\nFalse\nFalse\nTrue\n"
],
[
">>> 'a' in 'program'\nTrue\n>>> 'at' not in 'battle'\nFalse",
"_____no_output_____"
]
],
[
[
"### 3. Python Relational Operators \n\nAll the comparison (relational) operators i.e., **(<, ><=, >=, ==, !=, <>)** are also applicable for strings. The Strings are compared based on the **ASCII value** or **Unicode**(i.e., dictionary Order).",
"_____no_output_____"
]
],
[
[
"# Example:\n\nprint(\"HelloWorld\"==\"HelloWorld\")\nprint(\"helloWorld\">=\"HelloWorld\")\nprint(\"H\"<\"h\")",
"True\nTrue\nTrue\n"
]
],
[
[
"**Explanation:**\n\nThe ASCII value of a is 97, b is 98, c is 99 and so on. The ASCII value of A is 65, B is 66, C is 67 and so on. The comparison between strings are done on the basis on ASCII value.",
"_____no_output_____"
],
[
"The **`%`** operator is used to format a string inserting the value that comes after. It relies on the string containing a format specifier that identifies where to insert the value. The most common types of format specifiers are:\n\n - **`%s`** -> string\n - **`%d`** -> Integer\n - **`%f`** -> Float\n - **`%o`** -> Octal\n - **`%x`** -> Hexadecimal\n - **`%e`** -> exponential\n \nThese will be very familiar to anyone who has ever written a C or Java program and follow nearly exactly the same rules as the **[printf() function](https://en.wikipedia.org/wiki/Printf_format_string)**.",
"_____no_output_____"
]
],
[
[
"print(\"Hello %s\" % string1)\nprint(\"Actual Number = %d\" %19)\nprint(\"Float of the number = %f\" %19)\nprint(\"Octal equivalent of the number = %o\" %19)\nprint(\"Hexadecimal equivalent of the number = %x\" %19)\nprint(\"Exponential equivalent of the number = %e\" %19)",
"Hello World\nActual Number = 19\nFloat of the number = 19.000000\nOctal equivalent of the number = 23\nHexadecimal equivalent of the number = 13\nExponential equivalent of the number = 1.900000e+01\n"
]
],
[
[
"When referring to multiple variables parentheses is used. Values are inserted in the order they appear in the parantheses (more on tuples in the next section)",
"_____no_output_____"
]
],
[
[
"print(\"Hello %s %s. My name is Bond, you can call me %d\" %(string1,string2,99))",
"Hello World !. My name is Bond, you can call me 99\n"
]
],
[
[
"We can also specify the width of the field and the number of decimal places to be used. \nFor example:",
"_____no_output_____"
]
],
[
[
"print('Print width 10: |%10s|'%'x')\nprint('Print width 10: |%-10s|'%'x') # left justified\nprint(\"The number pi = %.1f to 1 decimal places\"%3.1415)\nprint(\"The number pi = %.2f to 2 decimal places\"%3.1415)\nprint(\"More space pi = %10.2f\"%3.1415)\nprint(\"Pad pi with 0 = %010.2f\"%3.1415) # pad with zeros",
"Print width 10: | x|\nPrint width 10: |x |\nThe number pi = 3.1 to 1 decimal places\nThe number pi = 3.14 to 2 decimal places\nMore space pi = 3.14\nPad pi with 0 = 0000003.14\n"
]
],
[
[
"### Built-in functions to Work with Python\n\nVarious built-in functions that work with sequence work with strings as well.\n\nSome of the commonly used ones are **`enumerate()`** and **`len()`**. The **[enumerate()](https://github.com/milaan9/04_Python_Functions/blob/main/002_Python_Functions_Built_in/018_Python_enumerate%28%29.ipynb)** function returns an enumerate object. It contains the index and value of all the items in the string as pairs. This can be useful for iteration.\n\nSimilarly, **[len()](https://github.com/milaan9/04_Python_Functions/blob/main/002_Python_Functions_Built_in/040_Python_len%28%29.ipynb)** returns the length (number of characters) of the string.",
"_____no_output_____"
]
],
[
[
"str = 'cold'\n\n# enumerate()\nlist_enumerate = list(enumerate(str))\nprint('list(enumerate(str) = ', list_enumerate)\n\n#character count\nprint('len(str) = ', len(str))",
"list(enumerate(str) = [(0, 'c'), (1, 'o'), (2, 'l'), (3, 'd')]\nlen(str) = 4\n"
]
],
[
[
"## Python String Formatting",
"_____no_output_____"
],
[
"### Escape Sequence\n\nIf we want to print a text like `He said, \"What's there?\"`, we can neither use single quotes nor double quotes. This will result in a SyntaxError as the text itself contains both single and double quotes.",
"_____no_output_____"
]
],
[
[
"print(\"He said, \"What's there?\"\")",
"_____no_output_____"
]
],
[
[
"One way to get around this problem is to use triple quotes. Alternatively, we can use escape sequences.\n\nAn escape sequence starts with a backslash and is interpreted differently. If we use a single quote to represent a string, all the single quotes inside the string must be escaped. Similar is the case with double quotes. Here is how it can be done to represent the above text.",
"_____no_output_____"
]
],
[
[
"# using triple quotes\nprint('''He said, \"What's there?\"''')\n\n# escaping single quotes\nprint('He said, \"What\\'s there?\"')\n\n# escaping double quotes\nprint(\"He said, \\\"What's there?\\\"\")",
"He said, \"What's there?\"\nHe said, \"What's there?\"\nHe said, \"What's there?\"\n"
]
],
[
[
"### Here is a list of all the escape sequences supported by Python.\n\n| Escape Sequence | Description |\n|:----:| :--- |\n| **`\\newline`** | Backslash and newline ignored | \n| **`\\\\`** | Backslash | \n| **`\\'`** | Single quote | \n| **`\\\"`** | Double quote | \n| **`\\a`** | ASCII Bell | \n| **`\\b`** | ASCII Backspace | \n| **`\\f`** | ASCII Formfeed | \n| **`\\n`** | ASCII Linefeed | \n| **`\\r`** | ASCII Carriage Return |\n| **`\\t`** | ASCII Horizontal Tab | \n| **`\\v`** | ASCII Vertical Tab | \n| **`\\ooo`** | Character with octal value ooo | \n| **`\\xHH`** | Character with hexadecimal value HH | ",
"_____no_output_____"
]
],
[
[
"# Here are some examples\n\nprint(\"C:\\\\Python32\\\\Lib\")\n#C:\\Python32\\Lib\n\nprint(\"This is printed\\nin two lines\")\n#This is printed\n#in two lines\n\nprint(\"This is \\x48\\x45\\x58 representation\")\n#This is HEX representation",
"C:\\Python32\\Lib\nThis is printed\nin two lines\nThis is HEX representation\n"
]
],
[
[
"### Raw String to ignore escape sequence\n\nSometimes we may wish to ignore the escape sequences inside a string. To do this we can place **`r`** or **`R`** in front of the string. This will imply that it is a raw string and any escape sequence inside it will be ignored.",
"_____no_output_____"
]
],
[
[
"print(\"This is \\x61 \\ngood example\")",
"This is a \ngood example\n"
],
[
"print(r\"This is \\x61 \\ngood example\")",
"This is \\x61 \\ngood example\n"
]
],
[
[
"### The `format()` Method for Formatting Strings\n\nThe **`format()`** method that is available with the string object is very versatile and powerful in formatting strings. Format strings contain curly braces **`{}`** as placeholders or replacement fields which get replaced.\n\nWe can use positional arguments or keyword arguments to specify the order.",
"_____no_output_____"
]
],
[
[
"# Python string format() method\n\n# default(implicit) order\ndefault_order = \"{}, {} and {}\".format('Allan','Bill','Cory')\nprint('\\n--- Default Order ---')\nprint(default_order)\n\n# order using positional argument\npositional_order = \"{1}, {0} and {2}\".format('Allan','Bill','Cory')\nprint('\\n--- Positional Order ---')\nprint(positional_order)\n\n# order using keyword argument\nkeyword_order = \"{s}, {b} and {j}\".format(j='Allan',b='Bill',s='Cory')\nprint('\\n--- Keyword Order ---')\nprint(keyword_order)",
"\n--- Default Order ---\nAllan, Bill and Cory\n\n--- Positional Order ---\nBill, Allan and Cory\n\n--- Keyword Order ---\nCory, Bill and Allan\n"
]
],
[
[
"The **`format()`** method can have optional format specifications. They are separated from the field name using colon. For example, we can left-justify **`<`**, right-justify **`>`** or center **`^`** a string in the given space.\n\nWe can also format integers as binary, hexadecimal, etc. and floats can be rounded or displayed in the exponent format. There are tons of formatting you can use. Visit here for all the **[string formatting available with the format()](https://github.com/milaan9/02_Python_Datatypes/blob/main/002_Python_String_Methods/009_Python_String_format%28%29.ipynb)** method.",
"_____no_output_____"
]
],
[
[
"# formatting integers\n\"Binary representation of {0} is {0:b}\".format(12)",
"_____no_output_____"
],
[
"# formatting floats\n\"Exponent representation: {0:e}\".format(1966.365)",
"_____no_output_____"
],
[
"# round off\n\"One third is: {0:.3f}\".format(1/3)",
"_____no_output_____"
],
[
"# string alignment\n\"|{:<10}|{:^10}|{:>10}|\".format('bread','butter','jam')",
"_____no_output_____"
]
],
[
[
"### Old style formatting\n\nWe can even format strings like the old **`sprintf()`** style used in C programming language. We use the **`%`** operator to accomplish this.",
"_____no_output_____"
]
],
[
[
"x = 36.3456789\nprint('The value of x is %3.2f' %x)",
"The value of x is 36.35\n"
],
[
"print('The value of x is %3.4f' %x)",
"The value of x is 36.3457\n"
]
],
[
[
"## Common Python String Methods\n\nThere are numerous methods available with the string object. The **`format()`** method that we mentioned above is one of them. \n\nStrings can be tranformed by a variety of functions that are all methods on a string. That is they are called by putting the function name with a **`.`** after the string. They include:\n\n* Upper vs lower case: **`upper()`**, **`lower()`**, **`captialize()`**, **`title()`** and **`swapcase()`**, **`join()`**, **`split()`**, **`find()`**, **`replace()`** etc, with mostly the obvious meaning. Note that `capitalize` makes the first letter of the string a capital only, while **`title`** selects upper case for the first letter of every word.\n\n\n* Padding strings: **`center(n)`**, **`ljust(n)`** and **`rjust(n)`** each place the string into a longer string of length n padded by spaces (centered, left-justified or right-justified respectively). **`zfill(n)`** works similarly but pads with leading zeros.\n\n\n* Stripping strings: Often we want to remove spaces, this is achived with the functions **`strip()`**, **`lstrip()`**, and **`rstrip()`** respectively to remove from spaces from the both end, just left or just the right respectively. An optional argument can be used to list a set of other characters to be removed.\n\nHere is a complete list of all the **[built-in methods to work with Strings in Python](https://github.com/milaan9/02_Python_Datatypes/tree/main/002_Python_String_Methods)**.",
"_____no_output_____"
]
],
[
[
"# Example:\n\ns=\"heLLo wORLd!\"\nprint(s.capitalize(),\"vs\",s.title())\n\nprint(\"upper case: '%s'\"%s.upper(),\"lower case: '%s'\"%s.lower(),\"and swapped: '%s'\"%s.swapcase())\n\nprint('|%s|' % \"Hello World\".center(30)) # center in 30 characters\n\nprint('|%s|'% \" lots of space \".strip()) # remove leading and trailing whitespace\n\nprint('%s without leading/trailing d,h,L or ! = |%s|',s.strip(\"dhL!\"))\n\nprint(\"Hello World\".replace(\"World\",\"Class\"))",
"Hello world! vs Hello World!\nupper case: 'HELLO WORLD!' lower case: 'hello world!' and swapped: 'HEllO WorlD!'\n| Hello World |\n|lots of space|\n%s without leading/trailing d,h,L or ! = |%s| eLLo wOR\nHello Class\n"
]
],
[
[
"#### Inspecting Strings\n\nThere are also lost of ways to inspect or check strings. Examples of a few of these are given here:\n\n* Checking the start or end of a string: **`startswith(\"string\")`** and **`endswith(\"string\")`** checks if it starts/ends with the string given as argument\n\n* Capitalisation: There are boolean counterparts for all forms of capitalisation, such as **`isupper()`**, **`islower()`** and **`istitle()`**\n\n* Character type: does the string only contain the characters:\n * 0-9: **`isdecimal()`**. Note there is also **`isnumeric()`** and **`isdigit()`** which are effectively the same function except for certain unicode characters\n * a-zA-Z: **`isalpha()`** or combined with digits: **`isalnum()`**\n * non-control code: **`isprintable()`** accepts anything except '\\n' an other ASCII control codes\n * \\t\\n \\r (white space characters): **`isspace()`**\n * Suitable as variable name: **`isidentifier()`**\n \n* Find elements of string: **`s.count(w)`** finds the number of times **`w`** occurs in **`s`**, while **`s.find(w)`** and **`s.rfind(w)`** find the first and last position of the string **`w`** in **`s`**.",
"_____no_output_____"
]
],
[
[
"# Example:\n\ns=\"Hello World\"\nprint(\"The length of '%s' is\"%s,len(s),\"characters\") # len() gives length of the string\n\ns.startswith(\"Hello\") and s.endswith(\"World\") # check start/end\n\n# count strings\nprint(\"There are %d 'l's but only %d World in %s\" % (s.count('l'),s.count('World'),s))\n\nprint('\"el\" is at index',s.find('el'),\"in\",s) #index from 0 or -1",
"The length of 'Hello World' is 11 characters\nThere are 3 'l's but only 1 World in Hello World\n\"el\" is at index 1 in Hello World\n"
]
],
[
[
"## Advanced string processing\nFor more advanced string processing there are many libraries available in Python including for example:\n* **re** for regular expression based searching and splitting of strings\n* **html** for manipulating HTML format text\n* **textwrap** for reformatting ASCII text\n* ... and many more",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a182a2bafeeba0c96499b98c7faa1107b007409
| 651,988 |
ipynb
|
Jupyter Notebook
|
intro-to-pytorch/Part 7 - Loading Image Data (Exercises).ipynb
|
NGrech/deep-learning-v2-pytorch
|
02c11938034429975d6e22bb4bedef1981f9a90f
|
[
"MIT"
] | null | null | null |
intro-to-pytorch/Part 7 - Loading Image Data (Exercises).ipynb
|
NGrech/deep-learning-v2-pytorch
|
02c11938034429975d6e22bb4bedef1981f9a90f
|
[
"MIT"
] | null | null | null |
intro-to-pytorch/Part 7 - Loading Image Data (Exercises).ipynb
|
NGrech/deep-learning-v2-pytorch
|
02c11938034429975d6e22bb4bedef1981f9a90f
|
[
"MIT"
] | null | null | null | 2,217.646259 | 398,040 | 0.960703 |
[
[
[
"# Loading Image Data\n\nSo far we've been working with fairly artificial datasets that you wouldn't typically be using in real projects. Instead, you'll likely be dealing with full-sized images like you'd get from smart phone cameras. In this notebook, we'll look at how to load images and use them to train neural networks.\n\nWe'll be using a [dataset of cat and dog photos](https://www.kaggle.com/c/dogs-vs-cats) available from Kaggle. Here are a couple example images:\n\n<img src='assets/dog_cat.png'>\n\nWe'll use this dataset to train a neural network that can differentiate between cats and dogs. These days it doesn't seem like a big accomplishment, but five years ago it was a serious challenge for computer vision systems.",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\n\nimport matplotlib.pyplot as plt\n\nimport torch\nfrom torchvision import datasets, transforms\n\nimport helper",
"_____no_output_____"
]
],
[
[
"The easiest way to load image data is with `datasets.ImageFolder` from `torchvision` ([documentation](http://pytorch.org/docs/master/torchvision/datasets.html#imagefolder)). In general you'll use `ImageFolder` like so:\n\n```python\ndataset = datasets.ImageFolder('path/to/data', transform=transform)\n```\n\nwhere `'path/to/data'` is the file path to the data directory and `transform` is a list of processing steps built with the [`transforms`](http://pytorch.org/docs/master/torchvision/transforms.html) module from `torchvision`. ImageFolder expects the files and directories to be constructed like so:\n```\nroot/dog/xxx.png\nroot/dog/xxy.png\nroot/dog/xxz.png\n\nroot/cat/123.png\nroot/cat/nsdf3.png\nroot/cat/asd932_.png\n```\n\nwhere each class has it's own directory (`cat` and `dog`) for the images. The images are then labeled with the class taken from the directory name. So here, the image `123.png` would be loaded with the class label `cat`. You can download the dataset already structured like this [from here](https://s3.amazonaws.com/content.udacity-data.com/nd089/Cat_Dog_data.zip). I've also split it into a training set and test set.\n\n### Transforms\n\nWhen you load in the data with `ImageFolder`, you'll need to define some transforms. For example, the images are different sizes but we'll need them to all be the same size for training. You can either resize them with `transforms.Resize()` or crop with `transforms.CenterCrop()`, `transforms.RandomResizedCrop()`, etc. We'll also need to convert the images to PyTorch tensors with `transforms.ToTensor()`. Typically you'll combine these transforms into a pipeline with `transforms.Compose()`, which accepts a list of transforms and runs them in sequence. It looks something like this to scale, then crop, then convert to a tensor:\n\n```python\ntransform = transforms.Compose([transforms.Resize(255),\n transforms.CenterCrop(224),\n transforms.ToTensor()])\n\n```\n\nThere are plenty of transforms available, I'll cover more in a bit and you can read through the [documentation](http://pytorch.org/docs/master/torchvision/transforms.html). \n\n### Data Loaders\n\nWith the `ImageFolder` loaded, you have to pass it to a [`DataLoader`](http://pytorch.org/docs/master/data.html#torch.utils.data.DataLoader). The `DataLoader` takes a dataset (such as you would get from `ImageFolder`) and returns batches of images and the corresponding labels. You can set various parameters like the batch size and if the data is shuffled after each epoch.\n\n```python\ndataloader = torch.utils.data.DataLoader(dataset, batch_size=32, shuffle=True)\n```\n\nHere `dataloader` is a [generator](https://jeffknupp.com/blog/2013/04/07/improve-your-python-yield-and-generators-explained/). To get data out of it, you need to loop through it or convert it to an iterator and call `next()`.\n\n```python\n# Looping through it, get a batch on each loop \nfor images, labels in dataloader:\n pass\n\n# Get one batch\nimages, labels = next(iter(dataloader))\n```\n \n>**Exercise:** Load images from the `Cat_Dog_data/train` folder, define a few transforms, then build the dataloader.",
"_____no_output_____"
]
],
[
[
"data_dir = 'Cat_Dog_data/train'\n\ntransform = transforms.Compose([\n transforms.Resize(255),\n transforms.CenterCrop(224),\n transforms.ToTensor()\n])\ndataset = datasets.ImageFolder(data_dir, transform)\ndataloader = torch.utils.data.DataLoader(dataset, batch_size=32, shuffle=True)",
"_____no_output_____"
],
[
"# Run this to test your data loader\nimages, labels = next(iter(dataloader))\nhelper.imshow(images[0], normalize=False)",
"_____no_output_____"
]
],
[
[
"If you loaded the data correctly, you should see something like this (your image will be different):\n\n<img src='assets/cat_cropped.png' width=244>",
"_____no_output_____"
],
[
"## Data Augmentation\n\nA common strategy for training neural networks is to introduce randomness in the input data itself. For example, you can randomly rotate, mirror, scale, and/or crop your images during training. This will help your network generalize as it's seeing the same images but in different locations, with different sizes, in different orientations, etc.\n\nTo randomly rotate, scale and crop, then flip your images you would define your transforms like this:\n\n```python\ntrain_transforms = transforms.Compose([transforms.RandomRotation(30),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.5, 0.5, 0.5], \n [0.5, 0.5, 0.5])])\n```\n\nYou'll also typically want to normalize images with `transforms.Normalize`. You pass in a list of means and list of standard deviations, then the color channels are normalized like so\n\n```input[channel] = (input[channel] - mean[channel]) / std[channel]```\n\nSubtracting `mean` centers the data around zero and dividing by `std` squishes the values to be between -1 and 1. Normalizing helps keep the network work weights near zero which in turn makes backpropagation more stable. Without normalization, networks will tend to fail to learn.\n\nYou can find a list of all [the available transforms here](http://pytorch.org/docs/0.3.0/torchvision/transforms.html). When you're testing however, you'll want to use images that aren't altered (except you'll need to normalize the same way). So, for validation/test images, you'll typically just resize and crop.\n\n>**Exercise:** Define transforms for training data and testing data below. Leave off normalization for now.",
"_____no_output_____"
]
],
[
[
"data_dir = 'Cat_Dog_data'\n\n# TODO: Define transforms for the training data and testing data\ntrain_transforms = transforms.Compose([transforms.RandomRotation(30),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor()])\n\ntest_transforms = transforms.Compose([transforms.RandomRotation(30),\n transforms.RandomResizedCrop(224),\n transforms.ToTensor()])\n\n\n# Pass transforms in here, then run the next cell to see how the transforms look\ntrain_data = datasets.ImageFolder(data_dir + '/train', transform=train_transforms)\ntest_data = datasets.ImageFolder(data_dir + '/test', transform=test_transforms)\n\ntrainloader = torch.utils.data.DataLoader(train_data, batch_size=32)\ntestloader = torch.utils.data.DataLoader(test_data, batch_size=32)",
"_____no_output_____"
],
[
"# change this to the trainloader or testloader \ndata_iter = iter(testloader)\n\nimages, labels = next(data_iter)\nfig, axes = plt.subplots(figsize=(10,4), ncols=4)\nfor ii in range(4):\n ax = axes[ii]\n helper.imshow(images[ii], ax=ax, normalize=False)",
"_____no_output_____"
]
],
[
[
"Your transformed images should look something like this.\n\n<center>Training examples:</center>\n<img src='assets/train_examples.png' width=500px>\n\n<center>Testing examples:</center>\n<img src='assets/test_examples.png' width=500px>",
"_____no_output_____"
],
[
"At this point you should be able to load data for training and testing. Now, you should try building a network that can classify cats vs dogs. This is quite a bit more complicated than before with the MNIST and Fashion-MNIST datasets. To be honest, you probably won't get it to work with a fully-connected network, no matter how deep. These images have three color channels and at a higher resolution (so far you've seen 28x28 images which are tiny).\n\nIn the next part, I'll show you how to use a pre-trained network to build a model that can actually solve this problem.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
4a183fc11927e4a665627e48b33428d397209111
| 5,887 |
ipynb
|
Jupyter Notebook
|
Exps/utils/.ipynb_checkpoints/Untitled-checkpoint.ipynb
|
Gwzwpxz/OnlineLP
|
a12bea25c8a4ddeb9317badc72a09afd50b60ef1
|
[
"MIT"
] | null | null | null |
Exps/utils/.ipynb_checkpoints/Untitled-checkpoint.ipynb
|
Gwzwpxz/OnlineLP
|
a12bea25c8a4ddeb9317badc72a09afd50b60ef1
|
[
"MIT"
] | null | null | null |
Exps/utils/.ipynb_checkpoints/Untitled-checkpoint.ipynb
|
Gwzwpxz/OnlineLP
|
a12bea25c8a4ddeb9317badc72a09afd50b60ef1
|
[
"MIT"
] | null | null | null | 27.900474 | 94 | 0.464583 |
[
[
[
"## Utility function test",
"_____no_output_____"
],
[
"This notebook is for test of utility functions",
"_____no_output_____"
]
],
[
[
"# Import dependencies\nimport numpy as np\nimport scipy.sparse\nfrom scipy.io import savemat, loadmat\nfrom gurobipy import *",
"_____no_output_____"
]
],
[
[
"#### Online Algorithm",
"_____no_output_____"
]
],
[
[
"def fastLP(A, b, c, K, Method):\n m = A.shape[0]\n n = A.shape[1]\n \n # It is worth considerinvg whether it is better to exclude K here\n # stepsize = 1 / np.sqrt(n * K)\n \n # Initialize dual solution\n if Method == \"M\":\n y = np.ones((m, 1)) / np.exp(1)\n else:\n y = np.zeros((m, 1))\n \n # Initialize resource\n d = b / n\n \n # Initialize primal solution\n x = np.zeros((n, 1))\n \n # Start dual descent\n for i in range(K):\n \n p = np.random.permutation(n)\n \n for j in p:\n \n stepsize = 1 / np.sqrt(n * (i + 1))\n aa = A[:, j].reshape(m, 1)\n xk = (c[j] > np.dot(aa.T, y))\n \n if Method == \"M\":\n y = np.multiply(y, np.exp(- stepsize * (d - aa * xk)))\n else:\n y = y - stepsize * (d - aa * xk)\n y = np.maximum(y, 0.0)\n \n x[j] += xk[0][0]\n \n obj = np.dot(c.T, x / K)\n \n return {\"x\": x / K, \"y\": y, \"obj\": obj}",
"_____no_output_____"
],
[
"def GRBLP(A, b, c):\n \n model = Model()\n x = model.addMVar(n, lb=0.0, ub=1.0, vtype=GRB.CONTINUOUS)\n constr = model.addMConstrs(A, x, GRB.LESS_EQUAL, b.squeeze())\n model.setMObjective(Q=None, c=c.squeeze(), constant=0.0, sense=GRB.MAXIMIZE)\n model.update()\n model.optimize()\n optdual = model.getAttr(GRB.Attr.Pi, model.getConstrs())\n optx = model.getAttr(GRB.Attr.X, model.getVars())\n time = model.getAttr(GRB.Attr.Runtime)\n obj = model.getAttr(GRB.Attr.ObjVal)\n \n return {\"x\": optx, \"y\": optdual, \"time\": time, \"model\": model, \"obj\": obj}",
"_____no_output_____"
],
[
"def GRBMIP(A, b, c):\n \n model = Model()\n x = model.addMVar(n, vtype=GRB.BINARY)\n constr = model.addMConstrs(A, x, GRB.LESS_EQUAL, b.squeeze())\n model.setMObjective(Q=None, c=c.squeeze(), constant=0.0, sense=GRB.MAXIMIZE)\n model.update()\n model.optimize()\n optdual = model.getAttr(GRB.Attr.Pi, model.getConstrs())\n optx = model.getAttr(GRB.Attr.X, model.getVars())\n time = model.getAttr(GRB.Attr.Runtime)\n obj = model.getAttr(GRB.Attr.ObjVal)\n \n return {\"x\": optx, \"y\": optdual, \"time\": time, \"model\": model, \"obj\": obj}",
"_____no_output_____"
],
[
"# Test of online algorithm\nm = 5\nn = 100\n\nA = np.random.randint(1, 1000, (m, n)) / 100\nb = np.sum(A, axis=1).reshape(m, 1) * 0.25\nc = np.sum(A, axis=0).reshape(n, 1) / m + np.random.rand(n, 1) * 5\n",
"_____no_output_____"
],
[
"res = fastLP(A, b, c, 1, \"S\")\ngres = GRBLP(A, b, c)",
"Gurobi Optimizer version 9.0.3 build v9.0.3rc0 (win64)\nOptimize a model with 5 rows, 100 columns and 500 nonzeros\nModel fingerprint: 0xddd873a4\nCoefficient statistics:\n Matrix range [3e-02, 1e+01]\n Objective range [3e+00, 1e+01]\n Bounds range [1e+00, 1e+00]\n RHS range [1e+02, 1e+02]\nPresolve time: 0.00s\nPresolved: 5 rows, 100 columns, 500 nonzeros\n\nIteration Objective Primal Inf. Dual Inf. Time\n 0 5.8584067e+03 4.513098e+03 0.000000e+00 0s\n 20 2.4471236e+02 0.000000e+00 0.000000e+00 0s\n\nSolved in 20 iterations and 0.01 seconds\nOptimal objective 2.447123646e+02\n"
],
[
"for n in [10, 100, 1000, 10000]",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a18405e229de211353c162cdcc3cf48f2c84ed7
| 90,812 |
ipynb
|
Jupyter Notebook
|
Private GES Behavior.ipynb
|
paula-gradu/ges
|
c0ca3f535982166e01d0d6247aa5b8a6972ba886
|
[
"BSD-3-Clause"
] | null | null | null |
Private GES Behavior.ipynb
|
paula-gradu/ges
|
c0ca3f535982166e01d0d6247aa5b8a6972ba886
|
[
"BSD-3-Clause"
] | null | null | null |
Private GES Behavior.ipynb
|
paula-gradu/ges
|
c0ca3f535982166e01d0d6247aa5b8a6972ba886
|
[
"BSD-3-Clause"
] | null | null | null | 161.300178 | 24,028 | 0.747049 |
[
[
[
"%load_ext autoreload\n%autoreload 2 ",
"_____no_output_____"
],
[
"import ges\nimport sempler\nimport numpy as np\nimport scipy.stats as st\nfrom ges.scores.gauss_obs_l0_pen import GaussObsL0Pen\nfrom ges.scores.general import GeneralScore",
"_____no_output_____"
]
],
[
[
"## Find Causal Graph and get confidence interval [one trial]",
"_____no_output_____"
]
],
[
[
"d = 20 # of attributes\nn = 500 # of datapoints\n\nmu_lb, mu_ub = 0, 10 # range for means of the d components\nsig_lb, sig_ub = 0, 10 # range for means of the variance components",
"_____no_output_____"
]
],
[
[
"## useful fns",
"_____no_output_____"
]
],
[
[
"def get_parents(x, G):\n parents = []\n for i in range(G.shape[0]):\n if(G[i, x] == 1):\n parents.append(i)\n return parents",
"_____no_output_____"
],
[
"def get_all_family(x, G):\n visited = np.zeros(G.shape[0])\n visited[x] = 1\n \n x_parents = get_parents(x, G)\n to_search = x_parents\n reach_from_x = []\n \n while len(to_search):\n to_search_new = []\n \n for y in to_search:\n if(visited[y]):\n continue\n else:\n visited[y] = 1\n \n y_parents = get_parents(y, G)\n to_search_new += y_parents\n reach_from_x.append(y)\n \n to_search = to_search_new\n \n return reach_from_x",
"_____no_output_____"
]
],
[
[
"## Experiment Definition (assume n >= 30)",
"_____no_output_____"
]
],
[
[
"def get_conf_interval(a, b, conf_lvl=.95):\n effect_size, resid, _, _ = np.linalg.lstsq(a, b, rcond=None)\n sq_tot_dev = sum([(a_i - np.mean(a))**2 for a_i in a])\n SE = np.sqrt(resid / ((n-2) * sq_tot_dev))\n conf = st.norm.ppf(conf_lvl) * SE\n return (effect_size[0] - conf[0], effect_size[0] + conf[0])",
"_____no_output_____"
],
[
"def experiment(d=10, n=500, trials=30, eps_noisy_max=0, eps_abv_thrsh=0, mu_range=(0, 10), sig_range=(1,1)):\n success = 0\n for trial in range(trials):\n # start from empty causal graph, generate data & fit causal graph\n G = np.zeros((d, d))\n data = sempler.LGANM(G, mu_range, sig_range).sample(n=n)\n estimate, score = ges.fit(GeneralScore(data), eps_noisy_max=eps_noisy_max, \\\n eps_abv_thrsh=eps_abv_thrsh, max_iter=1)\n if(len(np.where(estimate>0)[0]) == 0): # GES found empty graph so it is correct and we stop early\n success += 1\n continue\n\n # o/w choose arbirary edge & find confidence interval of effect size\n connections = np.where(estimate>0)\n #idx = np.random.randint(0, len(connections[0]))\n \n for idx in range(len(connections)):\n ## check if needs backdoor adj\n backdoor = [x for x in get_all_family(connections[0][idx], estimate) \\\n if x in get_all_family(connections[1][idx], estimate)]\n if(len(backdoor) == 0):\n break\n\n A = data[:, connections[0][idx]].reshape((n,1))\n for node in backdoor:\n A = np.column_stack((A, data[:, node]))\n b = data[:, connections[1][idx]]\n \n (conf_lb, conf_ub) = get_conf_interval(A, b)\n\n # check if 0 is in the interval\n if(conf_lb <= 0 and 0 <= conf_ub):\n success+=1\n \n return success / trials",
"_____no_output_____"
],
[
"results = {}",
"_____no_output_____"
],
[
"for noise_lvl in [0, 100, 200, 400, 800]:\n for d in [2]:\n for n in range(10, 901, 200):\n results[(noise_lvl,d,n)] = []\n for seed in range(5):\n results[(noise_lvl,d,n)].append(experiment(d=d, n=n, eps_noisy_max=noise_lvl/n, \\\n eps_abv_thrsh=noise_lvl/n))\n\n print(\"noise_lvl=\", noise_lvl, \"d=\",d, \", n=\", n,\" results:\", results[(noise_lvl,d,n)])",
"noise_lvl= 0 d= 2 , n= 10 results: [1.0, 1.0, 1.0, 1.0, 1.0]\nnoise_lvl= 0 d= 2 , n= 210 results: [1.0, 1.0, 1.0, 1.0, 1.0]\nnoise_lvl= 0 d= 2 , n= 410 results: [1.0, 1.0, 1.0, 1.0, 1.0]\nnoise_lvl= 0 d= 2 , n= 610 results: [1.0, 1.0, 1.0, 1.0, 1.0]\nnoise_lvl= 0 d= 2 , n= 810 results: [1.0, 1.0, 1.0, 1.0, 1.0]\nnoise_lvl= 100 d= 2 , n= 10 results: [0.8666666666666667, 0.8666666666666667, 0.9333333333333333, 0.9333333333333333, 0.9]\nnoise_lvl= 100 d= 2 , n= 210 results: [0.9333333333333333, 0.9666666666666667, 0.9333333333333333, 0.9, 0.9666666666666667]\nnoise_lvl= 100 d= 2 , n= 410 results: [1.0, 0.9, 0.9333333333333333, 1.0, 0.9666666666666667]\n"
],
[
"import statistics as stats\nimport matplotlib.pyplot as plt ",
"_____no_output_____"
],
[
"results_per_d_mean = {}\n\nresults_mean = np.zeros((1001, 50, 901))\nresults_CI = np.zeros((1001, 50, 901))\n\nfor noise_lvl in [0, 100, 200, 400, 800]:\n for d in [15]:\n for n in range(100, 901, 200):\n results_mean[noise_lvl, d, n] = np.mean(results[(noise_lvl,d,n)]) \n results_CI[noise_lvl, d, n] = 0.878 * stats.stdev(results[(noise_lvl,d,n)])",
"_____no_output_____"
],
[
"d=15",
"_____no_output_____"
],
[
"plt.plot(range(100,901, 200), results_mean[0,d,100::200], 'b-', label=\"noise=\"+str(0))\n#plt.plot(range(100,901, 200), results_mean[0,10,100::200] - results_CI[0,10,100::200], 'b--')\n#plt.plot(range(100,901, 200), results_mean[0,10,100::200] + results_CI[0,10,100::200], 'b--')\n\nplt.plot(range(100,901, 200), results_mean[100,d,100::200], 'c-', label=\"noise=\"+str(100))\n#plt.plot(range(100,901, 200), results_mean[1,10,100::200] - results_CI[1,10,100::200], 'c--')\n#plt.plot(range(100,901, 200), results_mean[1,10,100::200] + results_CI[1,10,100::200], 'c--')\n\nplt.plot(range(100,901, 200), results_mean[200,d,100::200], 'g-', label=\"noise=\"+str(200))\n#plt.plot(range(100,901, 200), results_mean[10,10,100::200] - results_CI[10,10,100::200], 'g--')\n#plt.plot(range(100,901, 200), results_mean[10,10,100::200] + results_CI[10,10,100::200], 'g--')\n\nplt.plot(range(100,901, 200), results_mean[400,d,100::200], 'y-', label=\"noise=\"+str(400))\n#plt.plot(range(100,901, 200), results_mean[100,10,100::200] - results_CI[100,10,100::200], 'y--')\n#plt.plot(range(100,901, 200), results_mean[100,10,100::200] + results_CI[100,10,100::200], 'y--')\n\nplt.plot(range(100,901, 200), results_mean[800,d,100::200], 'r-', label=\"noise=\"+str(800))\n\nplt.ylim((0,1.))\nplt.xlabel('# datapoints')\nplt.ylabel('success rate')\nplt.legend()\nplt.title(\"d=\" + str(d))\nplt.savefig(\"graph_d=15_comparison.pdf\");",
"_____no_output_____"
],
[
"results = {}\n\nfor noise_lvl in [1., 2., 4., 8., 16., 32.]:\n for d in [10, 15,20]:\n for n in range(100, 901, 200):\n results[(noise_lvl,d,n)] = []\n for seed in range(5):\n results[(noise_lvl,d,n)].append(experiment(d=d, n=n, noise_lvls=(noise_lvl/n, noise_lvl/n)))\n\n print(\"noise_lvl=\", noise_lvl, \"d=\",d, \", n=\", n,\" results:\", results[(noise_lvl,d,n)])",
"noise_lvl= 0.01 d= 10 , n= 100 results: [0.3333333333333333, 0.23333333333333334, 0.3, 0.26666666666666666, 0.26666666666666666]\nnoise_lvl= 0.01 d= 10 , n= 300 results: [0.5, 0.5, 0.5, 0.7333333333333333, 0.36666666666666664]\nnoise_lvl= 0.01 d= 10 , n= 500 results: [0.5, 0.5333333333333333, 0.5666666666666667, 0.6666666666666666, 0.4666666666666667]\nnoise_lvl= 0.01 d= 10 , n= 700 results: [0.5666666666666667, 0.6333333333333333, 0.6666666666666666, 0.5666666666666667, 0.7666666666666667]\nnoise_lvl= 0.01 d= 10 , n= 900 results: [0.7666666666666667, 0.7333333333333333, 0.6, 0.5666666666666667, 0.7333333333333333]\nnoise_lvl= 0.01 d= 15 , n= 100 results: [0.06666666666666667, 0.06666666666666667, 0.2, 0.1, 0.13333333333333333]\nnoise_lvl= 0.01 d= 15 , n= 300 results: [0.3, 0.3, 0.13333333333333333, 0.23333333333333334, 0.26666666666666666]\nnoise_lvl= 0.01 d= 15 , n= 500 results: [0.3333333333333333, 0.4666666666666667, 0.43333333333333335, 0.43333333333333335, 0.4]\nnoise_lvl= 0.01 d= 15 , n= 700 results: [0.4, 0.3, 0.4, 0.5333333333333333, 0.4666666666666667]\nnoise_lvl= 0.01 d= 15 , n= 900 results: [0.43333333333333335, 0.36666666666666664, 0.4, 0.43333333333333335, 0.5]\nnoise_lvl= 0.01 d= 20 , n= 100 results: [0.1, 0.03333333333333333, 0.16666666666666666, 0.13333333333333333, 0.06666666666666667]\nnoise_lvl= 0.01 d= 20 , n= 300 results: [0.06666666666666667, 0.13333333333333333, 0.03333333333333333, 0.1, 0.03333333333333333]\nnoise_lvl= 0.01 d= 20 , n= 500 results: [0.03333333333333333, 0.23333333333333334, 0.1, 0.0, 0.16666666666666666]\nnoise_lvl= 0.01 d= 20 , n= 700 results: [0.1, 0.13333333333333333, 0.1, 0.13333333333333333, 0.23333333333333334]\nnoise_lvl= 0.01 d= 20 , n= 900 results: [0.16666666666666666, 0.23333333333333334, 0.16666666666666666, 0.2, 0.26666666666666666]\nnoise_lvl= 0.1 d= 10 , n= 100 results: [0.3, 0.3333333333333333, 0.4, 0.26666666666666666, 0.4]\nnoise_lvl= 0.1 d= 10 , n= 300 results: [0.5333333333333333, 0.5, 0.6, 0.4666666666666667, 0.3]\nnoise_lvl= 0.1 d= 10 , n= 500 results: [0.5333333333333333, 0.5333333333333333, 0.4, 0.6333333333333333, 0.5666666666666667]\nnoise_lvl= 0.1 d= 10 , n= 700 results: [0.4666666666666667, 0.5333333333333333, 0.5666666666666667, 0.6333333333333333, 0.6666666666666666]\nnoise_lvl= 0.1 d= 10 , n= 900 results: [0.5333333333333333, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.7]\nnoise_lvl= 0.1 d= 15 , n= 100 results: [0.2, 0.16666666666666666, 0.13333333333333333, 0.13333333333333333, 0.3]\nnoise_lvl= 0.1 d= 15 , n= 300 results: [0.2, 0.2, 0.23333333333333334, 0.26666666666666666, 0.13333333333333333]\nnoise_lvl= 0.1 d= 15 , n= 500 results: [0.26666666666666666, 0.43333333333333335, 0.3333333333333333, 0.2, 0.3333333333333333]\nnoise_lvl= 0.1 d= 15 , n= 700 results: [0.43333333333333335, 0.3333333333333333, 0.43333333333333335, 0.43333333333333335, 0.4666666666666667]\nnoise_lvl= 0.1 d= 15 , n= 900 results: [0.6, 0.5666666666666667, 0.5, 0.36666666666666664, 0.23333333333333334]\nnoise_lvl= 0.1 d= 20 , n= 100 results: [0.1, 0.1, 0.13333333333333333, 0.1, 0.1]\nnoise_lvl= 0.1 d= 20 , n= 300 results: [0.03333333333333333, 0.1, 0.13333333333333333, 0.03333333333333333, 0.03333333333333333]\nnoise_lvl= 0.1 d= 20 , n= 500 results: [0.13333333333333333, 0.13333333333333333, 0.13333333333333333, 0.06666666666666667, 0.13333333333333333]\nnoise_lvl= 0.1 d= 20 , n= 700 results: [0.1, 0.26666666666666666, 0.26666666666666666, 0.26666666666666666, 0.13333333333333333]\nnoise_lvl= 0.1 d= 20 , n= 900 results: [0.06666666666666667, 0.13333333333333333, 0.13333333333333333, 0.13333333333333333, 0.16666666666666666]\nnoise_lvl= 0.3 d= 10 , n= 100 results: [0.36666666666666664, 0.2, 0.3333333333333333, 0.26666666666666666, 0.23333333333333334]\nnoise_lvl= 0.3 d= 10 , n= 300 results: [0.4, 0.26666666666666666, 0.43333333333333335, 0.5, 0.6]\nnoise_lvl= 0.3 d= 10 , n= 500 results: [0.7666666666666667, 0.5666666666666667, 0.7666666666666667, 0.7, 0.4666666666666667]\nnoise_lvl= 0.3 d= 10 , n= 700 results: [0.7, 0.6, 0.6333333333333333, 0.5666666666666667, 0.8333333333333334]\nnoise_lvl= 0.3 d= 10 , n= 900 results: [0.6333333333333333, 0.36666666666666664, 0.5666666666666667, 0.7666666666666667, 0.6]\nnoise_lvl= 0.3 d= 15 , n= 100 results: [0.03333333333333333, 0.13333333333333333, 0.23333333333333334, 0.16666666666666666, 0.13333333333333333]\nnoise_lvl= 0.3 d= 15 , n= 300 results: [0.1, 0.3333333333333333, 0.36666666666666664, 0.16666666666666666, 0.3]\nnoise_lvl= 0.3 d= 15 , n= 500 results: [0.26666666666666666, 0.23333333333333334, 0.3333333333333333, 0.3, 0.23333333333333334]\nnoise_lvl= 0.3 d= 15 , n= 700 results: [0.4, 0.26666666666666666, 0.43333333333333335, 0.4666666666666667, 0.2]\nnoise_lvl= 0.3 d= 15 , n= 900 results: [0.4, 0.4, 0.3333333333333333, 0.26666666666666666, 0.43333333333333335]\nnoise_lvl= 0.3 d= 20 , n= 100 results: [0.1, 0.16666666666666666, 0.03333333333333333, 0.1, 0.26666666666666666]\nnoise_lvl= 0.3 d= 20 , n= 300 results: [0.1, 0.16666666666666666, 0.0, 0.2, 0.1]\nnoise_lvl= 0.3 d= 20 , n= 500 results: [0.13333333333333333, 0.06666666666666667, 0.13333333333333333, 0.16666666666666666, 0.13333333333333333]\nnoise_lvl= 0.3 d= 20 , n= 700 results: [0.26666666666666666, 0.1, 0.16666666666666666, 0.1, 0.1]\nnoise_lvl= 0.3 d= 20 , n= 900 results: [0.2, 0.2, 0.2, 0.2, 0.2]\nnoise_lvl= 0.5 d= 10 , n= 100 results: [0.3333333333333333, 0.3, 0.2, 0.26666666666666666, 0.36666666666666664]\nnoise_lvl= 0.5 d= 10 , n= 300 results: [0.36666666666666664, 0.4, 0.4666666666666667, 0.5666666666666667, 0.4666666666666667]\nnoise_lvl= 0.5 d= 10 , n= 500 results: [0.7666666666666667, 0.43333333333333335, 0.6333333333333333, 0.6333333333333333, 0.5]\nnoise_lvl= 0.5 d= 10 , n= 700 results: [0.5333333333333333, 0.5333333333333333, 0.6, 0.7333333333333333, 0.6]\nnoise_lvl= 0.5 d= 10 , n= 900 results: [0.5333333333333333, 0.7666666666666667, 0.6666666666666666, 0.8333333333333334, 0.7666666666666667]\nnoise_lvl= 0.5 d= 15 , n= 100 results: [0.16666666666666666, 0.03333333333333333, 0.13333333333333333, 0.1, 0.26666666666666666]\nnoise_lvl= 0.5 d= 15 , n= 300 results: [0.2, 0.26666666666666666, 0.2, 0.3333333333333333, 0.3]\nnoise_lvl= 0.5 d= 15 , n= 500 results: [0.16666666666666666, 0.3, 0.16666666666666666, 0.36666666666666664, 0.26666666666666666]\nnoise_lvl= 0.5 d= 15 , n= 700 results: [0.43333333333333335, 0.3333333333333333, 0.36666666666666664, 0.4, 0.4666666666666667]\nnoise_lvl= 0.5 d= 15 , n= 900 results: [0.3333333333333333, 0.4, 0.4666666666666667, 0.36666666666666664, 0.3]\nnoise_lvl= 0.5 d= 20 , n= 100 results: [0.13333333333333333, 0.06666666666666667, 0.1, 0.06666666666666667, 0.06666666666666667]\nnoise_lvl= 0.5 d= 20 , n= 300 results: [0.13333333333333333, 0.16666666666666666, 0.23333333333333334, 0.13333333333333333, 0.0]\nnoise_lvl= 0.5 d= 20 , n= 500 results: [0.06666666666666667, 0.13333333333333333, 0.13333333333333333, 0.13333333333333333, 0.1]\nnoise_lvl= 0.5 d= 20 , n= 700 results: [0.1, 0.23333333333333334, 0.1, 0.2, 0.1]\nnoise_lvl= 0.5 d= 20 , n= 900 results: [0.2, 0.1, 0.13333333333333333, 0.16666666666666666, 0.06666666666666667]\n"
],
[
"for noise_lvl in [1., 2., 4., 8., 16., 32.]:\n for d in [10, 15,20]:\n for n in range(100, 901, 200):\n results[(noise_lvl,d,n)] = []\n for seed in range(5):\n results[(noise_lvl,d,n)].append(experiment(d=d, n=n, noise_lvls=(noise_lvl/n, noise_lvl/n)))\n\n print(\"noise_lvl=\", noise_lvl, \"d=\",d, \", n=\", n,\" results:\", results[(noise_lvl,d,n)])",
"noise_lvl= 1.0 d= 10 , n= 100 results: [0.26666666666666666, 0.26666666666666666, 0.36666666666666664, 0.16666666666666666, 0.3]\nnoise_lvl= 1.0 d= 10 , n= 300 results: [0.4666666666666667, 0.5666666666666667, 0.43333333333333335, 0.5666666666666667, 0.36666666666666664]\nnoise_lvl= 1.0 d= 10 , n= 500 results: [0.43333333333333335, 0.3333333333333333, 0.6666666666666666, 0.6666666666666666, 0.5]\nnoise_lvl= 1.0 d= 10 , n= 700 results: [0.6333333333333333, 0.6333333333333333, 0.6, 0.6, 0.5333333333333333]\nnoise_lvl= 1.0 d= 10 , n= 900 results: [0.7666666666666667, 0.6333333333333333, 0.7, 0.7333333333333333, 0.8]\nnoise_lvl= 1.0 d= 15 , n= 100 results: [0.06666666666666667, 0.13333333333333333, 0.13333333333333333, 0.06666666666666667, 0.26666666666666666]\nnoise_lvl= 1.0 d= 15 , n= 300 results: [0.3333333333333333, 0.16666666666666666, 0.3333333333333333, 0.2, 0.06666666666666667]\nnoise_lvl= 1.0 d= 15 , n= 500 results: [0.26666666666666666, 0.3, 0.43333333333333335, 0.3333333333333333, 0.26666666666666666]\nnoise_lvl= 1.0 d= 15 , n= 700 results: [0.23333333333333334, 0.43333333333333335, 0.4, 0.36666666666666664, 0.3]\nnoise_lvl= 1.0 d= 15 , n= 900 results: [0.36666666666666664, 0.5, 0.3333333333333333, 0.5, 0.23333333333333334]\nnoise_lvl= 1.0 d= 20 , n= 100 results: [0.03333333333333333, 0.03333333333333333, 0.1, 0.1, 0.23333333333333334]\nnoise_lvl= 1.0 d= 20 , n= 300 results: [0.1, 0.13333333333333333, 0.13333333333333333, 0.16666666666666666, 0.03333333333333333]\nnoise_lvl= 1.0 d= 20 , n= 500 results: [0.1, 0.16666666666666666, 0.16666666666666666, 0.1, 0.23333333333333334]\nnoise_lvl= 1.0 d= 20 , n= 700 results: [0.23333333333333334, 0.1, 0.13333333333333333, 0.3333333333333333, 0.1]\nnoise_lvl= 1.0 d= 20 , n= 900 results: [0.23333333333333334, 0.2, 0.36666666666666664, 0.3, 0.16666666666666666]\nnoise_lvl= 2.0 d= 10 , n= 100 results: [0.4666666666666667, 0.3333333333333333, 0.4, 0.36666666666666664, 0.36666666666666664]\nnoise_lvl= 2.0 d= 10 , n= 300 results: [0.5666666666666667, 0.26666666666666666, 0.6333333333333333, 0.4666666666666667, 0.4666666666666667]\nnoise_lvl= 2.0 d= 10 , n= 500 results: [0.7666666666666667, 0.5333333333333333, 0.5, 0.6666666666666666, 0.7333333333333333]\nnoise_lvl= 2.0 d= 10 , n= 700 results: [0.5666666666666667, 0.6333333333333333, 0.6, 0.7666666666666667, 0.7]\nnoise_lvl= 2.0 d= 10 , n= 900 results: [0.6, 0.5, 0.7333333333333333, 0.7333333333333333, 0.7]\nnoise_lvl= 2.0 d= 15 , n= 100 results: [0.2, 0.16666666666666666, 0.2, 0.16666666666666666, 0.1]\nnoise_lvl= 2.0 d= 15 , n= 300 results: [0.3, 0.26666666666666666, 0.16666666666666666, 0.13333333333333333, 0.26666666666666666]\nnoise_lvl= 2.0 d= 15 , n= 500 results: [0.26666666666666666, 0.13333333333333333, 0.3333333333333333, 0.43333333333333335, 0.3333333333333333]\nnoise_lvl= 2.0 d= 15 , n= 700 results: [0.3333333333333333, 0.3333333333333333, 0.26666666666666666, 0.43333333333333335, 0.36666666666666664]\nnoise_lvl= 2.0 d= 15 , n= 900 results: [0.6, 0.4, 0.5, 0.36666666666666664, 0.43333333333333335]\nnoise_lvl= 2.0 d= 20 , n= 100 results: [0.13333333333333333, 0.1, 0.13333333333333333, 0.1, 0.1]\nnoise_lvl= 2.0 d= 20 , n= 300 results: [0.2, 0.1, 0.13333333333333333, 0.23333333333333334, 0.1]\nnoise_lvl= 2.0 d= 20 , n= 500 results: [0.13333333333333333, 0.06666666666666667, 0.2, 0.13333333333333333, 0.13333333333333333]\nnoise_lvl= 2.0 d= 20 , n= 700 results: [0.13333333333333333, 0.2, 0.1, 0.16666666666666666, 0.06666666666666667]\nnoise_lvl= 2.0 d= 20 , n= 900 results: [0.26666666666666666, 0.16666666666666666, 0.26666666666666666, 0.3, 0.23333333333333334]\nnoise_lvl= 4.0 d= 10 , n= 100 results: [0.4666666666666667, 0.13333333333333333, 0.3333333333333333, 0.36666666666666664, 0.23333333333333334]\nnoise_lvl= 4.0 d= 10 , n= 300 results: [0.5, 0.6, 0.5666666666666667, 0.4, 0.6666666666666666]\nnoise_lvl= 4.0 d= 10 , n= 500 results: [0.4, 0.6333333333333333, 0.4666666666666667, 0.5666666666666667, 0.5333333333333333]\nnoise_lvl= 4.0 d= 10 , n= 700 results: [0.5666666666666667, 0.5333333333333333, 0.6, 0.5333333333333333, 0.5333333333333333]\nnoise_lvl= 4.0 d= 10 , n= 900 results: [0.6333333333333333, 0.7333333333333333, 0.8, 0.6333333333333333, 0.7333333333333333]\nnoise_lvl= 4.0 d= 15 , n= 100 results: [0.16666666666666666, 0.1, 0.2, 0.1, 0.06666666666666667]\nnoise_lvl= 4.0 d= 15 , n= 300 results: [0.2, 0.26666666666666666, 0.2, 0.26666666666666666, 0.2]\nnoise_lvl= 4.0 d= 15 , n= 500 results: [0.3, 0.36666666666666664, 0.3, 0.2, 0.3333333333333333]\nnoise_lvl= 4.0 d= 15 , n= 700 results: [0.3333333333333333, 0.36666666666666664, 0.43333333333333335, 0.36666666666666664, 0.26666666666666666]\nnoise_lvl= 4.0 d= 15 , n= 900 results: [0.43333333333333335, 0.4, 0.5, 0.36666666666666664, 0.4]\nnoise_lvl= 4.0 d= 20 , n= 100 results: [0.03333333333333333, 0.1, 0.1, 0.1, 0.06666666666666667]\nnoise_lvl= 4.0 d= 20 , n= 300 results: [0.16666666666666666, 0.2, 0.03333333333333333, 0.16666666666666666, 0.1]\nnoise_lvl= 4.0 d= 20 , n= 500 results: [0.13333333333333333, 0.13333333333333333, 0.13333333333333333, 0.16666666666666666, 0.1]\nnoise_lvl= 4.0 d= 20 , n= 700 results: [0.3, 0.1, 0.2, 0.13333333333333333, 0.1]\nnoise_lvl= 4.0 d= 20 , n= 900 results: [0.16666666666666666, 0.16666666666666666, 0.2, 0.16666666666666666, 0.16666666666666666]\nnoise_lvl= 8.0 d= 10 , n= 100 results: [0.43333333333333335, 0.4666666666666667, 0.3333333333333333, 0.23333333333333334, 0.43333333333333335]\nnoise_lvl= 8.0 d= 10 , n= 300 results: [0.4666666666666667, 0.5666666666666667, 0.5333333333333333, 0.43333333333333335, 0.4]\nnoise_lvl= 8.0 d= 10 , n= 500 results: [0.6333333333333333, 0.7333333333333333, 0.7, 0.5333333333333333, 0.5333333333333333]\nnoise_lvl= 8.0 d= 10 , n= 700 results: [0.5333333333333333, 0.7, 0.6666666666666666, 0.6333333333333333, 0.43333333333333335]\nnoise_lvl= 8.0 d= 10 , n= 900 results: [0.7, 0.7, 0.6666666666666666, 0.7666666666666667, 0.5333333333333333]\nnoise_lvl= 8.0 d= 15 , n= 100 results: [0.2, 0.06666666666666667, 0.13333333333333333, 0.3, 0.13333333333333333]\nnoise_lvl= 8.0 d= 15 , n= 300 results: [0.26666666666666666, 0.3333333333333333, 0.16666666666666666, 0.26666666666666666, 0.3]\nnoise_lvl= 8.0 d= 15 , n= 500 results: [0.2, 0.36666666666666664, 0.3, 0.23333333333333334, 0.26666666666666666]\nnoise_lvl= 8.0 d= 15 , n= 700 results: [0.4, 0.4666666666666667, 0.3333333333333333, 0.3333333333333333, 0.3333333333333333]\nnoise_lvl= 8.0 d= 15 , n= 900 results: [0.3333333333333333, 0.5, 0.5333333333333333, 0.5, 0.4]\nnoise_lvl= 8.0 d= 20 , n= 100 results: [0.13333333333333333, 0.13333333333333333, 0.03333333333333333, 0.06666666666666667, 0.06666666666666667]\nnoise_lvl= 8.0 d= 20 , n= 300 results: [0.06666666666666667, 0.03333333333333333, 0.1, 0.06666666666666667, 0.2]\nnoise_lvl= 8.0 d= 20 , n= 500 results: [0.1, 0.2, 0.2, 0.2, 0.03333333333333333]\nnoise_lvl= 8.0 d= 20 , n= 700 results: [0.13333333333333333, 0.13333333333333333, 0.1, 0.1, 0.26666666666666666]\nnoise_lvl= 8.0 d= 20 , n= 900 results: [0.16666666666666666, 0.2, 0.3, 0.16666666666666666, 0.13333333333333333]\nnoise_lvl= 16.0 d= 10 , n= 100 results: [0.43333333333333335, 0.4, 0.4, 0.4, 0.4]\nnoise_lvl= 16.0 d= 10 , n= 300 results: [0.7, 0.5666666666666667, 0.6, 0.5666666666666667, 0.43333333333333335]\nnoise_lvl= 16.0 d= 10 , n= 500 results: [0.6666666666666666, 0.6, 0.6, 0.5666666666666667, 0.5666666666666667]\nnoise_lvl= 16.0 d= 10 , n= 700 results: [0.4666666666666667, 0.6333333333333333, 0.5666666666666667, 0.7666666666666667, 0.6333333333333333]\nnoise_lvl= 16.0 d= 10 , n= 900 results: [0.6666666666666666, 0.7, 0.7, 0.6333333333333333, 0.5666666666666667]\nnoise_lvl= 16.0 d= 15 , n= 100 results: [0.2, 0.13333333333333333, 0.3, 0.13333333333333333, 0.3]\nnoise_lvl= 16.0 d= 15 , n= 300 results: [0.4, 0.2, 0.26666666666666666, 0.43333333333333335, 0.06666666666666667]\nnoise_lvl= 16.0 d= 15 , n= 500 results: [0.3333333333333333, 0.43333333333333335, 0.4, 0.23333333333333334, 0.3333333333333333]\nnoise_lvl= 16.0 d= 15 , n= 700 results: [0.36666666666666664, 0.3333333333333333, 0.3, 0.43333333333333335, 0.3333333333333333]\nnoise_lvl= 16.0 d= 15 , n= 900 results: [0.4, 0.4, 0.5333333333333333, 0.36666666666666664, 0.43333333333333335]\n"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a184c3196159573442922e031ff2280ee948aa6
| 66,583 |
ipynb
|
Jupyter Notebook
|
Data/Data Manipulation.ipynb
|
coddyzz/Junction
|
3a2112dcba4e647a8d7fe8aca28198e42f766fe9
|
[
"MIT"
] | 1 |
2019-11-16T10:48:40.000Z
|
2019-11-16T10:48:40.000Z
|
Data/Data Manipulation.ipynb
|
coddyzz/Helsinki-plus
|
3a2112dcba4e647a8d7fe8aca28198e42f766fe9
|
[
"MIT"
] | 1 |
2019-11-16T19:58:51.000Z
|
2019-11-16T19:58:51.000Z
|
Data/Data Manipulation.ipynb
|
coddyzz/Helsinki-plus
|
3a2112dcba4e647a8d7fe8aca28198e42f766fe9
|
[
"MIT"
] | 1 |
2019-11-16T19:38:20.000Z
|
2019-11-16T19:38:20.000Z
| 36.806523 | 126 | 0.27516 |
[
[
[
"import pandas as pd\nimport numpy as np",
"_____no_output_____"
],
[
"visit = pd.read_csv(\"visitorCount.csv\",dtype=str)\na = visit.melt( id_vars=['time'])\n# a.to_csv(\"visitorMelt.csv\")",
"_____no_output_____"
],
[
"movement = pd.read_csv(\"movements.csv\")\nmovement = movement.astype('category')\nlen(movement)",
"_____no_output_____"
],
[
"stations = pd.read_csv(\"stations.csv\")\nstations['double_count'] = False\n",
"_____no_output_____"
],
[
"stations[stations['serial']==\"000000007b5207b6\"]",
"_____no_output_____"
],
[
"any(movement[movement['hash']=='013c76b508f0d5d70b060e9f7248771ef4314b90b811f7b0b1734824'].groupby(['time']).size() > 1)",
"_____no_output_____"
],
[
"# df = pd.DataFrame(columns=['Hash',\"Serial\"])\nmat = np.array([])\nfor i in movement.groupby(['hash','serial']).size()[0:70].index:\n try:\n if(i[0] == a[0]):\n mat = np.append(mat,np.array(a))\n mat = np.append(mat,np.array(i))\n except:\n pass\n a = i\n \nmat = mat.reshape(int(len(mat)/2),2)\nmat",
"_____no_output_____"
],
[
"for row in mat:\n if(any(movement[movement['hash']==row[0]].groupby(['time']).size() > 1)):\n a = stations[stations['serial']==row[1]].index[0]\n stations.at[a,'double_count'] = True\n\n# stations.to_csv(\"stations_dbl_count.csv\")",
"_____no_output_____"
],
[
"hashToAddress = dict(list(zip(stations['serial'].values,stations['address'].values)))\nhashToAddress",
"_____no_output_____"
],
[
"from sklearn.preprocessing import normalize\nweather = pd.read_csv(\"Helsinki_weather_data.csv\", dtype=str)\nweather['Time'] = weather['d'] + \"/\" + weather['m'] + \"/\" + weather['Year'] + \" \"+ weather['Time']\n# normalize(weather[['Cloud amount (1/8)','Pressure (msl) (hPa)','Relative humidity (%)',\n# 'Precipitation intensity (mm/h)','Snow depth (cm)','Air temperature (degC)',\n# 'Dew-point temperature (degC)','Horizontal visibility (m)','Wind direction (deg)',\n# 'Gust speed (m/s)','Wind speed (m/s)']], axis=1).ravel()\n# weather[['Cloud amount (1/8)','Pressure (msl) (hPa)']]\n# weather.to_csv(\"weather_mod.csv\")",
"_____no_output_____"
],
[
"from datetime import datetime\nnew = pd.DataFrame()\nweather['time'] = weather['Time'].apply(lambda x: datetime.strptime(x,'%d/%m/%Y %H:%M'))\nvisit['time'] = visit['time'].apply(lambda x: datetime.strptime(x,'%d/%m/%Y %H:%M'))\n# sum(new['w_time'] == new['v_time'])",
"_____no_output_____"
],
[
"weather.drop(columns='Time',inplace=True)\nweather.dtypes",
"_____no_output_____"
],
[
"visit.rename(columns=hashToAddress,inplace=True)\nvisit",
"_____no_output_____"
],
[
"output = visit.set_index('time').join(weather.set_index('time'),how=\"left\",rsuffix = \"_\").reset_index()\noutput.to_csv('joined_Visit_Weather_updated.csv')\noutput",
"_____no_output_____"
],
[
"output",
"_____no_output_____"
],
[
"accuracy = pd.DataFrame()\nfor hashs in hashToAddress.keys():\n try:\n a = pd.read_csv(hashs+\"Prediction.csv\")\n a['station'] = hashToAddress[hashs]\n a['Time'] = pd.date_range(start='19/8/2019', end='20/8/2019',freq=\"120s\")\n accuracy = accuracy.append(a)\n# print(a.head())\n except:\n pass\n\n\naccuracy.to_csv(\"accuracyPlot_updated.csv\")\naccuracy",
"_____no_output_____"
],
[
"pd.date_range(start='19/8/2019', end='20/8/2019',freq=\"120s\")",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a185956d1117e00e545567dc90e65112bc60b27
| 174,229 |
ipynb
|
Jupyter Notebook
|
Recommendation_Test.ipynb
|
saikrishnauppaluri/RecommendationSystemPySpark
|
9860e6417dff2709c47e9e3dbcab1d3fca3e8183
|
[
"Apache-2.0"
] | null | null | null |
Recommendation_Test.ipynb
|
saikrishnauppaluri/RecommendationSystemPySpark
|
9860e6417dff2709c47e9e3dbcab1d3fca3e8183
|
[
"Apache-2.0"
] | null | null | null |
Recommendation_Test.ipynb
|
saikrishnauppaluri/RecommendationSystemPySpark
|
9860e6417dff2709c47e9e3dbcab1d3fca3e8183
|
[
"Apache-2.0"
] | null | null | null | 66.779992 | 36,002 | 0.706605 |
[
[
[
"# **Setting up the Environment**\n\nAll the necessary paths for datasets on drive and jdk are passed. \nAlso all the required libraries are installed and imported along with configuration of spark context for future use.\n",
"_____no_output_____"
]
],
[
[
"# Mounting the google drive for easy access of the dataset \nfrom google.colab import drive\ndrive.mount('/content/drive')",
"Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&response_type=code&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly\n\nEnter your authorization code:\n··········\nMounted at /content/drive\n"
],
[
"# Path for Java\nJAVA_HOME = \"/usr/lib/jvm/java-8-openjdk-amd64\"",
"_____no_output_____"
]
],
[
[
"#**PySpark implementation**",
"_____no_output_____"
]
],
[
[
"!pip install pyspark\n!pip install -U -q PyDrive\n!apt install openjdk-8-jdk-headless -qq\nimport os\nos.environ[\"JAVA_HOME\"] = JAVA_HOME",
"Collecting pyspark\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/8e/b0/bf9020b56492281b9c9d8aae8f44ff51e1bc91b3ef5a884385cb4e389a40/pyspark-3.0.0.tar.gz (204.7MB)\n\u001b[K |████████████████████████████████| 204.7MB 66kB/s \n\u001b[?25hCollecting py4j==0.10.9\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/9e/b6/6a4fb90cd235dc8e265a6a2067f2a2c99f0d91787f06aca4bcf7c23f3f80/py4j-0.10.9-py2.py3-none-any.whl (198kB)\n\u001b[K |████████████████████████████████| 204kB 44.0MB/s \n\u001b[?25hBuilding wheels for collected packages: pyspark\n Building wheel for pyspark (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for pyspark: filename=pyspark-3.0.0-py2.py3-none-any.whl size=205044182 sha256=acf76f71a021f2ee1ba022b4b66230d85396afb07c5365eee278266cd89840de\n Stored in directory: /root/.cache/pip/wheels/57/27/4d/ddacf7143f8d5b76c45c61ee2e43d9f8492fc5a8e78ebd7d37\nSuccessfully built pyspark\nInstalling collected packages: py4j, pyspark\nSuccessfully installed py4j-0.10.9 pyspark-3.0.0\nThe following package was automatically installed and is no longer required:\n libnvidia-common-440\nUse 'apt autoremove' to remove it.\nThe following additional packages will be installed:\n openjdk-8-jre-headless\nSuggested packages:\n openjdk-8-demo openjdk-8-source libnss-mdns fonts-dejavu-extra\n fonts-ipafont-gothic fonts-ipafont-mincho fonts-wqy-microhei\n fonts-wqy-zenhei fonts-indic\nThe following NEW packages will be installed:\n openjdk-8-jdk-headless openjdk-8-jre-headless\n0 upgraded, 2 newly installed, 0 to remove and 33 not upgraded.\nNeed to get 35.8 MB of archives.\nAfter this operation, 140 MB of additional disk space will be used.\nSelecting previously unselected package openjdk-8-jre-headless:amd64.\n(Reading database ... 144379 files and directories currently installed.)\nPreparing to unpack .../openjdk-8-jre-headless_8u252-b09-1~18.04_amd64.deb ...\nUnpacking openjdk-8-jre-headless:amd64 (8u252-b09-1~18.04) ...\nSelecting previously unselected package openjdk-8-jdk-headless:amd64.\nPreparing to unpack .../openjdk-8-jdk-headless_8u252-b09-1~18.04_amd64.deb ...\nUnpacking openjdk-8-jdk-headless:amd64 (8u252-b09-1~18.04) ...\nSetting up openjdk-8-jre-headless:amd64 (8u252-b09-1~18.04) ...\nupdate-alternatives: using /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/orbd to provide /usr/bin/orbd (orbd) in auto mode\nupdate-alternatives: using /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/servertool to provide /usr/bin/servertool (servertool) in auto mode\nupdate-alternatives: using /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/tnameserv to provide /usr/bin/tnameserv (tnameserv) in auto mode\nSetting up openjdk-8-jdk-headless:amd64 (8u252-b09-1~18.04) ...\nupdate-alternatives: using /usr/lib/jvm/java-8-openjdk-amd64/bin/idlj to provide /usr/bin/idlj (idlj) in auto mode\nupdate-alternatives: using /usr/lib/jvm/java-8-openjdk-amd64/bin/wsimport to provide /usr/bin/wsimport (wsimport) in auto mode\nupdate-alternatives: using /usr/lib/jvm/java-8-openjdk-amd64/bin/jsadebugd to provide /usr/bin/jsadebugd (jsadebugd) in auto mode\nupdate-alternatives: using /usr/lib/jvm/java-8-openjdk-amd64/bin/native2ascii to provide /usr/bin/native2ascii (native2ascii) in auto mode\nupdate-alternatives: using /usr/lib/jvm/java-8-openjdk-amd64/bin/javah to provide /usr/bin/javah (javah) in auto mode\nupdate-alternatives: using /usr/lib/jvm/java-8-openjdk-amd64/bin/clhsdb to provide /usr/bin/clhsdb (clhsdb) in auto mode\nupdate-alternatives: using /usr/lib/jvm/java-8-openjdk-amd64/bin/extcheck to provide /usr/bin/extcheck (extcheck) in auto mode\nupdate-alternatives: using /usr/lib/jvm/java-8-openjdk-amd64/bin/hsdb to provide /usr/bin/hsdb (hsdb) in auto mode\nupdate-alternatives: using /usr/lib/jvm/java-8-openjdk-amd64/bin/schemagen to provide /usr/bin/schemagen (schemagen) in auto mode\nupdate-alternatives: using /usr/lib/jvm/java-8-openjdk-amd64/bin/xjc to provide /usr/bin/xjc (xjc) in auto mode\nupdate-alternatives: using /usr/lib/jvm/java-8-openjdk-amd64/bin/jhat to provide /usr/bin/jhat (jhat) in auto mode\nupdate-alternatives: using /usr/lib/jvm/java-8-openjdk-amd64/bin/wsgen to provide /usr/bin/wsgen (wsgen) in auto mode\n"
],
[
"# Installing and importing the required python libraries\nimport requests\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport time\n%matplotlib inline\n\nimport pyspark\nfrom pyspark.sql import *\nfrom pyspark.sql.types import *\nfrom pyspark.sql.functions import *\nfrom pyspark import SparkContext, SparkConf",
"/usr/local/lib/python3.6/dist-packages/statsmodels/tools/_testing.py:19: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead.\n import pandas.util.testing as tm\n"
],
[
"# import the SparkConfiguration and SparkContext\n# if we wanted to change any configuration settings for this session only we would define them here\nconf = (SparkConf()\n .setMaster(\"local\")\n .setAppName(\"Recommender_System\")\n .set(\"spark.executor.memory\", \"16G\")\n .set(\"spark.driver.memory\", \"16G\")\n .set(\"spark.executor.cores\", \"8\"))\n# create a SparkContext using the above configuration\nsc = SparkContext(conf=conf)\nspark = SparkSession.builder.getOrCreate()",
"_____no_output_____"
],
[
"spark",
"_____no_output_____"
],
[
"# this command shows the current configuration settings\nsc._conf.getAll()",
"_____no_output_____"
]
],
[
[
"# **Loading the Dataset**\n\nLet's read the Movie and TV ratings dataset from the Google drive and do a quick inspection of the dataset",
"_____no_output_____"
]
],
[
[
"# read in the dataset from google drive\nrating = spark.read.load(\"/content/drive/My Drive/Ratings_Movies_and_TV.csv\", \n format=\"csv\", \n inferSchema=\"true\", \n header=\"false\"\n )",
"_____no_output_____"
],
[
"# number of rows in the dataset\nrating.count()",
"_____no_output_____"
],
[
"# Printing the head of dataset \nrating.show()",
"+----------+--------------+---+----------+\n| _c0| _c1|_c2| _c3|\n+----------+--------------+---+----------+\n|0001527665|A3478QRKQDOPQ2|5.0|1362960000|\n|0001527665|A2VHSG6TZHU1OB|5.0|1361145600|\n|0001527665|A23EJWOW1TLENE|5.0|1358380800|\n|0001527665|A1KM9FNEJ8Q171|5.0|1357776000|\n|0001527665|A38LY2SSHVHRYB|4.0|1356480000|\n|0001527665| AHTYUW2H1276L|5.0|1353024000|\n|0001527665|A3M3HCZLXW0YLF|5.0|1342310400|\n|0001527665|A1OMHX76O2NC6V|1.0|1283472000|\n|0001527665|A3OBOZ41IK6O1M|1.0|1273190400|\n|0005089549|A2M1CU2IRZG0K9|5.0|1352419200|\n|0005089549|A1XIXLXK9B4DAJ|5.0|1347321600|\n|0005089549| AFTUJYISOFHY6|5.0|1325203200|\n|0005089549| AEIAQFCWNRUSE|5.0|1301702400|\n|0005089549|A16WO8T4YXGVWP|5.0|1277596800|\n|0005089549| AX7ANRP31Q7YA|5.0|1240272000|\n|0005089549| AIPN1XFK37ZWI|5.0| 933984000|\n|000503860X| A7H20K09VIXXT|4.0|1123891200|\n|000503860X|A2LGI22B6XRZVA|5.0|1123286400|\n|000503860X|A2A4GWAEM3VOW0|5.0|1116374400|\n|000503860X|A226BMXAQAJVOQ|5.0|1115942400|\n+----------+--------------+---+----------+\nonly showing top 20 rows\n\n"
],
[
"# Renaming the columns of the dataset for Easy reference\nratings = rating.select(col(\"_c0\").alias(\"UserId\"), col(\"_c1\").alias(\"MovieId\"),col(\"_c2\").alias(\"Rating\"),col(\"_c3\").alias(\"Timestamp\"))\nratings.show()",
"+----------+--------------+------+----------+\n| UserId| MovieId|Rating| Timestamp|\n+----------+--------------+------+----------+\n|0001527665|A3478QRKQDOPQ2| 5.0|1362960000|\n|0001527665|A2VHSG6TZHU1OB| 5.0|1361145600|\n|0001527665|A23EJWOW1TLENE| 5.0|1358380800|\n|0001527665|A1KM9FNEJ8Q171| 5.0|1357776000|\n|0001527665|A38LY2SSHVHRYB| 4.0|1356480000|\n|0001527665| AHTYUW2H1276L| 5.0|1353024000|\n|0001527665|A3M3HCZLXW0YLF| 5.0|1342310400|\n|0001527665|A1OMHX76O2NC6V| 1.0|1283472000|\n|0001527665|A3OBOZ41IK6O1M| 1.0|1273190400|\n|0005089549|A2M1CU2IRZG0K9| 5.0|1352419200|\n|0005089549|A1XIXLXK9B4DAJ| 5.0|1347321600|\n|0005089549| AFTUJYISOFHY6| 5.0|1325203200|\n|0005089549| AEIAQFCWNRUSE| 5.0|1301702400|\n|0005089549|A16WO8T4YXGVWP| 5.0|1277596800|\n|0005089549| AX7ANRP31Q7YA| 5.0|1240272000|\n|0005089549| AIPN1XFK37ZWI| 5.0| 933984000|\n|000503860X| A7H20K09VIXXT| 4.0|1123891200|\n|000503860X|A2LGI22B6XRZVA| 5.0|1123286400|\n|000503860X|A2A4GWAEM3VOW0| 5.0|1116374400|\n|000503860X|A226BMXAQAJVOQ| 5.0|1115942400|\n+----------+--------------+------+----------+\nonly showing top 20 rows\n\n"
]
],
[
[
"# **Data Exploration**\n\nLet's investigate the data and try to find and make some observations by looking at summary statistics\n ",
"_____no_output_____"
]
],
[
[
"print ('Distinct values of ratings:')\nprint (ratings.select('Rating').distinct().rdd.map(lambda r: r[0]).collect())",
"Distinct values of ratings:\n[1.0, 4.0, 3.0, 2.0, 5.0]\n"
],
[
"tmp1 = ratings.groupBy(\"UserId\").count().select('count').rdd.min()[0]\ntmp2 = ratings.groupBy(\"MovieId\").count().select('count').rdd.min()[0]\nprint ('For the users that rated movies and the movies that were rated:')\nprint ('Minimum number of ratings per User is {}'.format(tmp1))\nprint ('Minimum number of ratings per Movie is {}'.format(tmp2))",
"For the users that rated movies and the movies that were rated:\nMinimum number of ratings per User is 1\nMinimum number of ratings per Movie is 1\n"
],
[
"tmp1 = ratings.groupBy(\"movieId\").count().withColumnRenamed(\"count\", \"rating count\")\\\n.groupBy(\"rating count\").count().orderBy('rating count').first()[1]\n# Or use pandas: tmp1 = sum(ratings.groupBy(\"movieId\").count().toPandas()['count'] == 1)\ntmp2 = ratings.select('movieId').distinct().count()\nprint ('{} out of {} movies are rated by only one user'.format(tmp1, tmp2))",
"2429325 out of 3826085 movies are rated by only one user\n"
],
[
"print (\"Number of users who rated movies:\", ratings.select('UserId').distinct().count())\nprint (\"Number of rated movies:\", ratings.select('MovieId').distinct().count())",
"Number of users who rated movies: 182032\nNumber of rated movies: 3826085\n"
]
],
[
[
"**Summary of Descriptive Statistics**",
"_____no_output_____"
]
],
[
[
"ratings.describe().toPandas()",
"_____no_output_____"
]
],
[
[
"Converting Spark data to well-known Pandas could be done easily with toPandas() method:",
"_____no_output_____"
]
],
[
[
"# To access plotting libraries, we need to first transform our PySpark DataFrame into a Pandas DataFrame\nRatings_pdf = ratings.toPandas() ",
"_____no_output_____"
]
],
[
[
"**Analysis of Rating Distributions**",
"_____no_output_____"
]
],
[
[
"with sns.axes_style('white'):\n g = sns.catplot(\"Rating\",\n data=Ratings_pdf,\n kind=\"count\", aspect=2,color='steelblue')\n ",
"_____no_output_____"
],
[
"Ratings_pdf_mean_counts = pd.DataFrame(Ratings_pdf.groupby('MovieId')['Rating'].mean())\nRatings_pdf_mean_counts['Rating_Counts'] = pd.DataFrame(Ratings_pdf.groupby('MovieId')['Rating'].count())\n\nplt.figure(figsize=(12,10))\nplt.rcParams['patch.force_edgecolor'] = True\nsns.jointplot(x='Rating', y='Rating_Counts', data=Ratings_pdf_mean_counts, alpha=0.4)",
"_____no_output_____"
]
],
[
[
"# **Sampling**\n\nHere I have decided to take a sample of the dataset for recommendation as the entire dataset is quite huge and implementing various ML techniques like model training, hyperparameter tuning etc. is difficult on free/local resources without crashing\n\nI have use stratified sampling to have the best unbiased sample with appropriate size",
"_____no_output_____"
]
],
[
[
"ratings.groupBy(\"MovieId\").count().show()",
"+--------------+-----+\n| MovieId|count|\n+--------------+-----+\n| AVIKFXS6MT2YV| 2|\n|A3826GI7UHI7SZ| 9|\n|A33GM0OUOWK19O| 1|\n|A1687MV0PLK74B| 16|\n| AB64DUL65WO6O| 10|\n|A1FWW47TZ65PNY| 1|\n|A3RJ48YJJ3NOII| 18|\n|A1M0G9T633G1C3| 5|\n|A2QVL8FGY79WWH| 3|\n|A20TI7T43DCSRY| 2|\n|A343A2TZEZ9Y86| 1|\n| A6GMEO3VRY51S| 207|\n|A2DNWSXNZBD204| 23|\n| AZS14W9Q9XCUQ| 8|\n| AX3NVXGCTQ8AN| 1|\n| AYMM8AP7UVA8Y| 1|\n|A2E130DG40UA2L| 17|\n| ASWG8EJHCWPKC| 21|\n|A1UFEP3IOALM8V| 1|\n|A2BUPLL6RIQBRB| 1|\n+--------------+-----+\nonly showing top 20 rows\n\n"
],
[
"fractions = ratings.select(\"MovieId\").distinct().withColumn(\"fraction\", lit(0.7)).rdd.collectAsMap()\nprint(fractions) \nsampled_ratings = ratings.stat.sampleBy(\"MovieId\", fractions, seed=1234)\nsampled_ratings.show()",
"IOPub data rate exceeded.\nThe notebook server will temporarily stop sending output\nto the client in order to avoid crashing it.\nTo change this limit, set the config variable\n`--NotebookApp.iopub_data_rate_limit`.\n\nCurrent values:\nNotebookApp.iopub_data_rate_limit=1000000.0 (bytes/sec)\nNotebookApp.rate_limit_window=3.0 (secs)\n\n"
],
[
"sampled_ratings.count()",
"_____no_output_____"
],
[
"sampled_ratings.groupBy(\"MovieId\").count().show()",
"+--------------+-----+\n| MovieId|count|\n+--------------+-----+\n|A3826GI7UHI7SZ| 7|\n|A33GM0OUOWK19O| 1|\n|A1687MV0PLK74B| 13|\n| AB64DUL65WO6O| 9|\n|A1FWW47TZ65PNY| 1|\n|A1M0G9T633G1C3| 2|\n|A2QVL8FGY79WWH| 2|\n|A20TI7T43DCSRY| 2|\n| A6GMEO3VRY51S| 143|\n| AYMM8AP7UVA8Y| 1|\n| ASWG8EJHCWPKC| 15|\n|A1UFEP3IOALM8V| 1|\n|A39VS8820WCN0G| 7|\n| ANEDXRFDZDL18| 161|\n| A8PL5OAS7IWKR| 21|\n|A1GX7WCR2HBXNH| 11|\n|A3NYF46FNW70VP| 1|\n|A3510KIIRV4KZV| 1|\n|A1KHZXNXZS8EUN| 1|\n|A2DWUZLALBWVM6| 2|\n+--------------+-----+\nonly showing top 20 rows\n\n"
]
],
[
[
"Converting Spark data to well-known Pandas could be done easily with toPandas() method:",
"_____no_output_____"
]
],
[
[
"# To access plotting libraries, we need to first transform our PySpark DataFrame into a Pandas DataFrame\nRating_pdf = sampled_ratings.toPandas() ",
"_____no_output_____"
],
[
"with sns.axes_style('white'):\n ax = sns.violinplot(x=Ratings_pdf[\"Rating\"])",
"_____no_output_____"
],
[
"Rating_pdf_mean_counts = pd.DataFrame(Rating_pdf.groupby('MovieId')['Rating'].mean())\nRating_pdf_mean_counts['Rating_Counts'] = pd.DataFrame(Rating_pdf.groupby('MovieId')['Rating'].count())\n\nplt.figure(figsize=(12,10))\nplt.rcParams['patch.force_edgecolor'] = True\nsns.jointplot(x='Rating', y='Rating_Counts', data=Rating_pdf_mean_counts, alpha=0.4)",
"_____no_output_____"
]
],
[
[
"# **Data Pre-processing**\n\nThe data is processed to have the right set of variables and its types for implementing the ML algorithm, in our case it is Alternating Least Square (ALS) which recommends using Collaborative filtering",
"_____no_output_____"
]
],
[
[
"Ratings = sampled_ratings.select(\"UserId\", \"MovieId\", \"Rating\")",
"_____no_output_____"
],
[
"# inspect the schema of the data frame\nRatings.printSchema()",
"root\n |-- UserId: string (nullable = true)\n |-- MovieId: string (nullable = true)\n |-- Rating: double (nullable = true)\n\n"
],
[
"# The userId and movieId have to be integers or double, and the rating has to be float/double numbers.\nfrom pyspark.ml import Pipeline\nfrom pyspark.ml.feature import StringIndexer\n\nindexers = [StringIndexer(inputCol=column, outputCol=column+\"_index\").fit(Ratings) for column in list(set(Ratings.columns)-set(['Rating'])) ]\n\n\npipeline = Pipeline(stages=indexers)\nRatings_df = pipeline.fit(Ratings).transform(Ratings)\n\nRatings_df.show() ",
"+----------+--------------+------+-------------+------------+\n| UserId| MovieId|Rating|MovieId_index|UserId_index|\n+----------+--------------+------+-------------+------------+\n|0001527665|A23EJWOW1TLENE| 5.0| 1572690.0| 44694.0|\n|0001527665|A1KM9FNEJ8Q171| 5.0| 553740.0| 44694.0|\n|0001527665| AHTYUW2H1276L| 5.0| 2756451.0| 44694.0|\n|0001527665|A3M3HCZLXW0YLF| 5.0| 824414.0| 44694.0|\n|0001527665|A1OMHX76O2NC6V| 1.0| 1350975.0| 44694.0|\n|0001527665|A3OBOZ41IK6O1M| 1.0| 2429282.0| 44694.0|\n|0005089549|A2M1CU2IRZG0K9| 5.0| 368343.0| 39345.0|\n|0005089549|A1XIXLXK9B4DAJ| 5.0| 1485046.0| 39345.0|\n|0005089549| AFTUJYISOFHY6| 5.0| 272548.0| 39345.0|\n|0005089549| AEIAQFCWNRUSE| 5.0| 2706410.0| 39345.0|\n|0005089549|A16WO8T4YXGVWP| 5.0| 197473.0| 39345.0|\n|0005089549| AX7ANRP31Q7YA| 5.0| 2987424.0| 39345.0|\n|0005089549| AIPN1XFK37ZWI| 5.0| 2769622.0| 39345.0|\n|000503860X| A7H20K09VIXXT| 4.0| 874585.0| 36530.0|\n|000503860X|A2LGI22B6XRZVA| 5.0| 1844208.0| 36530.0|\n|000503860X|A2A4GWAEM3VOW0| 5.0| 647867.0| 36530.0|\n|000503860X|A226BMXAQAJVOQ| 5.0| 1554177.0| 36530.0|\n|000503860X| AIL0Y2OUE1680| 5.0| 915540.0| 36530.0|\n|000503860X|A12VPEOEZS1KTC| 5.0| 9387.0| 36530.0|\n|000503860X| ATLZNVLYKP9AZ| 5.0| 22466.0| 36530.0|\n+----------+--------------+------+-------------+------------+\nonly showing top 20 rows\n\n"
],
[
"Ratings_df = Ratings_df.drop('UserId','MovieId')",
"_____no_output_____"
],
[
"# inspect the schema again\nRatings_df.printSchema()",
"root\n |-- Rating: double (nullable = true)\n |-- MovieId_index: double (nullable = false)\n |-- UserId_index: double (nullable = false)\n\n"
]
],
[
[
"**Extracting the stratified sampling file \"sample_ratings\" and save the file in the drive**",
"_____no_output_____"
]
],
[
[
"Ratings_df.coalesce(1).write.csv('/content/drive/My Drive/sample_ratings.csv')\n",
"_____no_output_____"
]
],
[
[
"**Loading the sampled dataset**\n\nLet's read the Movie and TV ratings dataset from the Google drive and do a quick inspection of the dataset\n",
"_____no_output_____"
]
],
[
[
"# read in the dataset from google drive\nratings_df = spark.read.load(\"/content/drive/My Drive/sample_ratings.csv\", \n format=\"csv\", \n inferSchema=\"true\", \n header=\"false\"\n )",
"_____no_output_____"
],
[
"# number of rows in the dataset\nratings_df.count()",
"_____no_output_____"
],
[
"# Printing the head of dataset \nratings_df.show()",
"+---+---------+-------+\n|_c0| _c1| _c2|\n+---+---------+-------+\n|5.0|1572690.0|44694.0|\n|5.0| 553740.0|44694.0|\n|5.0|2756451.0|44694.0|\n|5.0| 824414.0|44694.0|\n|1.0|1350975.0|44694.0|\n|1.0|2429282.0|44694.0|\n|5.0| 368343.0|39345.0|\n|5.0|1485046.0|39345.0|\n|5.0| 272548.0|39345.0|\n|5.0|2706410.0|39345.0|\n|5.0| 197473.0|39345.0|\n|5.0|2987424.0|39345.0|\n|5.0|2769622.0|39345.0|\n|4.0| 874585.0|36530.0|\n|5.0|1844208.0|36530.0|\n|5.0| 647867.0|36530.0|\n|5.0|1554177.0|36530.0|\n|5.0| 915540.0|36530.0|\n|5.0| 9387.0|36530.0|\n|5.0| 22466.0|36530.0|\n+---+---------+-------+\nonly showing top 20 rows\n\n"
],
[
"# Renaming the columns of the dataset for Easy reference\nratings_df = ratings_df.select(col(\"_c0\").alias(\"Rating\"), col(\"_c1\").alias(\"MovieId_index\"),col(\"_c2\").alias(\"UserId_index\"))\nratings_df.show()",
"+------+-------------+------------+\n|Rating|MovieId_index|UserId_index|\n+------+-------------+------------+\n| 5.0| 1572690.0| 44694.0|\n| 5.0| 553740.0| 44694.0|\n| 5.0| 2756451.0| 44694.0|\n| 5.0| 824414.0| 44694.0|\n| 1.0| 1350975.0| 44694.0|\n| 1.0| 2429282.0| 44694.0|\n| 5.0| 368343.0| 39345.0|\n| 5.0| 1485046.0| 39345.0|\n| 5.0| 272548.0| 39345.0|\n| 5.0| 2706410.0| 39345.0|\n| 5.0| 197473.0| 39345.0|\n| 5.0| 2987424.0| 39345.0|\n| 5.0| 2769622.0| 39345.0|\n| 4.0| 874585.0| 36530.0|\n| 5.0| 1844208.0| 36530.0|\n| 5.0| 647867.0| 36530.0|\n| 5.0| 1554177.0| 36530.0|\n| 5.0| 915540.0| 36530.0|\n| 5.0| 9387.0| 36530.0|\n| 5.0| 22466.0| 36530.0|\n+------+-------------+------------+\nonly showing top 20 rows\n\n"
]
],
[
[
"# **Splitting the dataset into Train and Test**\n\nI will split the data into training/testing sets using a 80/20 random splits.\nThis is done to Train the model on train set and finally evaluate the model post predicting on the test set",
"_____no_output_____"
]
],
[
[
"(train, test) = ratings_df.randomSplit([0.8, 0.2])",
"_____no_output_____"
],
[
"print (\"The number of ratings in each set: {}, {}\".format(train.count(), test.count()))",
"The number of ratings in each set: 4910969, 1226252\n"
]
],
[
[
"# **Alternating Least Squares (ALS)**\n\nLet's take a look on parameters, and try to find any improvements.\n\nParameters of ALS Model in PySpark realization are following:\n\n\n\n* **NumBlocks** is the number of blocks the users and items will be partitioned intoin order to parallelize computation.(set to -1 to auto-configure).\n* **rank** is the number of latent factors in the model.\n\n* **maxIter** is the maximum number of iterations to run.\n\n* **regParam** specifies the regularization parameter in ALS.\n* **implicitPrefs** specifies whether to use the explicit feedback ALS variant or one adapted for implicit feedback data (defaults to false which means using explicit feedback).\n* **alpha** is a parameter applicable to the implicit feedback variant of ALS that governs the baseline confidence in preference observations (defaults to 1.0).",
"_____no_output_____"
]
],
[
[
"# Build the recommendation model using ALS on the training data\n# Note we set cold start strategy to 'drop' to ensure we don't get NaN evaluation metrics\nfrom pyspark.ml.recommendation import ALS\nals = ALS(rank = 25, maxIter=5, regParam=0.01, \n userCol=\"UserId_index\", itemCol=\"MovieId_index\", ratingCol=\"Rating\",\n coldStartStrategy=\"drop\",\n implicitPrefs=True)\n",
"_____no_output_____"
],
[
"# fit the model to the training data\nmodel = als.fit(train) ",
"_____no_output_____"
]
],
[
[
"#**Make predictions on test_data**",
"_____no_output_____"
]
],
[
[
"predictions_als = model.transform(test)\n\n# View the predictions \npredictions_als.show()",
"+------+-------------+------------+-----------+\n|Rating|MovieId_index|UserId_index| prediction|\n+------+-------------+------------+-----------+\n| 5.0| 148.0| 458.0| 0.39505172|\n| 3.0| 148.0| 12322.0|0.039479308|\n| 5.0| 148.0| 155.0| 0.34969345|\n| 3.0| 148.0| 1700.0| 0.28224003|\n| 5.0| 148.0| 1226.0| 0.26553732|\n| 5.0| 148.0| 11677.0| 0.22716017|\n| 5.0| 148.0| 908.0| 0.5086603|\n| 5.0| 148.0| 916.0| 0.14837763|\n| 3.0| 148.0| 3037.0| 0.07306489|\n| 5.0| 148.0| 4363.0|0.108396456|\n| 5.0| 148.0| 7742.0| 0.15486585|\n| 3.0| 148.0| 14032.0|0.049149938|\n| 5.0| 148.0| 246.0| 0.18326698|\n| 4.0| 148.0| 861.0| 0.54574233|\n| 5.0| 148.0| 6350.0| 0.0642322|\n| 5.0| 148.0| 2466.0| 0.23370916|\n| 3.0| 148.0| 1599.0| 0.22250427|\n| 4.0| 148.0| 4515.0| 0.24154097|\n| 2.0| 148.0| 2296.0| 0.17780864|\n| 4.0| 148.0| 1467.0| 0.232636|\n+------+-------------+------------+-----------+\nonly showing top 20 rows\n\n"
]
],
[
[
"#**Evaluate the predictions**\n\nEvaluate the model by computing the RMSE on the test data",
"_____no_output_____"
]
],
[
[
"# Evaluate the model by computing the RMSE on the test data\nfrom pyspark.ml.evaluation import RegressionEvaluator\n\nevaluator = RegressionEvaluator(metricName=\"rmse\", labelCol=\"Rating\",\n predictionCol=\"prediction\")\n\nrmse = evaluator.evaluate(predictions_als)\nprint(\"Root-mean-square error = \" + str(rmse)) ",
"Root-mean-square error = 4.361782907696021\n"
]
],
[
[
"#**Evaluate the predictions**\nEvaluate the model by computing the MAE on the test data",
"_____no_output_____"
]
],
[
[
"# instantiate evaluator, specifying the desired metric \"mae\" and the columns\n# that contain the predictions and the actual values\nevaluator = RegressionEvaluator(metricName=\"mae\", predictionCol=\"prediction\", labelCol=\"Rating\")\n\n# evaluate the output of our model\nmae = evaluator.evaluate(predictions_als)\n\nprint('The Mean Absolute Error is %.3f' % (mae))",
"The Mean Absolute Error is 4.207\n"
]
],
[
[
"In this we have achieved very high RMSE & MAE score \n\n",
"_____no_output_____"
],
[
"#**Parameter Optimization**",
"_____no_output_____"
],
[
"#**TrainValidationSplit**\n\n\nThe parameters we will search over are:\n\n\n* **Rank** - The number of hidden features that we will use to describe the users/movies.\n\n* **RegParam** - The regularization parameter applied to the cost function.\n\n",
"_____no_output_____"
],
[
"**Root Mean Square Error**\n\n",
"_____no_output_____"
]
],
[
[
"from pyspark.ml.tuning import TrainValidationSplit, ParamGridBuilder\nfrom pyspark.ml.recommendation import ALS\nfrom pyspark.ml.evaluation import RegressionEvaluator\n\n#create a new ALS estimator\nals = ALS(userCol=\"UserId_index\", itemCol=\"MovieId_index\", ratingCol=\"Rating\", coldStartStrategy=\"drop\")\n#define a grid for both parameters\nparamGrid = ParamGridBuilder() \\\n .addGrid(als.rank, [5, 10, 15]) \\\n .addGrid(als.regParam, [1, 0.1, 0.01]) \\\n .build()",
"_____no_output_____"
],
[
"# Define evaluator as RMSE\nevaluator = RegressionEvaluator(metricName=\"rmse\", predictionCol=\"prediction\", labelCol=\"Rating\")",
"_____no_output_____"
],
[
"# split the data with a ratio of 80% training, 20% validation\n# define the estimator and evaluator to use to determine the best model\n# also pass in the parameter grid to search over\ntrainValSplit = TrainValidationSplit(estimator = als, estimatorParamMaps=paramGrid, \n evaluator = RegressionEvaluator(metricName=\"rmse\", predictionCol=\"prediction\", labelCol=\"Rating\"), \n trainRatio = 0.8, parallelism = 4)",
"_____no_output_____"
],
[
"# fit the model to the training data\nmodel = trainValSplit.fit(train)",
"_____no_output_____"
],
[
"# retrieve the best model\nbestModel = model.bestModel",
"_____no_output_____"
]
],
[
[
"Unfortunately there is currently no way in spark to see which combination of hyperparameters were used in the best model. We now use the best model to transform the test data and compute predictions & evaluate.",
"_____no_output_____"
],
[
"#**Make predictions on test_data**",
"_____no_output_____"
]
],
[
[
"# transform test data using bestModel\npredictions = bestModel.transform(test)\n\n# View the predictions \npredictions.show()",
"+------+-------------+------------+----------+\n|Rating|MovieId_index|UserId_index|prediction|\n+------+-------------+------------+----------+\n| 3.0| 148.0| 12322.0| 3.0732892|\n| 4.0| 148.0| 22903.0| 3.5088274|\n| 5.0| 148.0| 2044.0| 3.3701227|\n| 5.0| 148.0| 1030.0| 3.3683994|\n| 4.0| 148.0| 15159.0| 3.2757866|\n| 2.0| 148.0| 1244.0| 3.1373582|\n| 3.0| 148.0| 3006.0| 3.1929076|\n| 5.0| 148.0| 13529.0| 3.1663463|\n| 5.0| 148.0| 734.0| 3.3845575|\n| 3.0| 148.0| 10877.0| 3.173956|\n| 5.0| 148.0| 27199.0| 3.2234533|\n| 4.0| 148.0| 10458.0| 3.284251|\n| 5.0| 148.0| 3856.0| 3.3899229|\n| 1.0| 148.0| 27947.0| 2.8077927|\n| 1.0| 148.0| 38498.0| 3.2089317|\n| 5.0| 148.0| 8589.0| 3.1504912|\n| 3.0| 148.0| 3298.0| 2.987259|\n| 5.0| 148.0| 4897.0| 3.238539|\n| 5.0| 148.0| 3505.0| 3.3055627|\n| 2.0| 148.0| 930.0| 2.2503598|\n+------+-------------+------------+----------+\nonly showing top 20 rows\n\n"
]
],
[
[
"#**Evaluate the predictions**\nEvaluate the model by computing the RMSE on the test data",
"_____no_output_____"
]
],
[
[
"# evaluate the predictions\nrmse = evaluator.evaluate(predictions)\nprint('Root Mean Square Error = ' + str(rmse))",
"Root Mean Square Error = 1.3249204788516387\n"
]
],
[
[
"# Our RMSE score improved really well over our previous RMSE of 4.361782907696021",
"_____no_output_____"
],
[
"#**TrainValidationSplit**\n**Mean Absolute Error**",
"_____no_output_____"
]
],
[
[
"# Define evaluator as MAE\nevaluator = RegressionEvaluator(metricName=\"mae\", predictionCol=\"prediction\", labelCol=\"Rating\")",
"_____no_output_____"
],
[
"# split the data with a ratio of 80% training, 20% validation\n# define the estimator and evaluator to use to determine the best model\n# also pass in the parameter grid to search over\ntrainValSplit_1 = TrainValidationSplit(estimator = als, estimatorParamMaps=paramGrid, \n evaluator = RegressionEvaluator(metricName=\"mae\", predictionCol=\"prediction\", labelCol=\"Rating\"), \n trainRatio = 0.8, parallelism = 4)",
"_____no_output_____"
],
[
"# fit the model to the training data\nmodel_1 = trainValSplit_1.fit(train)",
"_____no_output_____"
],
[
"# retrieve the best model\nbestModel_1 = model_1.bestModel",
"_____no_output_____"
]
],
[
[
"Unfortunately there is currently no way in spark to see which combination of hyperparameters were used in the best model. We now use the best model to transform the test data and compute predictions & evaluate.",
"_____no_output_____"
],
[
"#**Make predictions on test_data**",
"_____no_output_____"
]
],
[
[
"# transform test data using bestModel\npredictions_1 = bestModel_1.transform(test)\n\n# View the predictions \npredictions_1.show()",
"+------+-------------+------------+----------+\n|Rating|MovieId_index|UserId_index|prediction|\n+------+-------------+------------+----------+\n| 5.0| 148.0| 458.0| 4.0985966|\n| 4.0| 148.0| 3124.0| 4.4277363|\n| 5.0| 148.0| 3234.0| 4.64029|\n| 2.0| 148.0| 3725.0| 2.7166846|\n| 5.0| 148.0| 2191.0| 4.212287|\n| 3.0| 148.0| 10877.0| 4.1731853|\n| 1.0| 148.0| 9813.0| 4.169053|\n| 4.0| 148.0| 1467.0| 4.785465|\n| 4.0| 148.0| 3932.0| 4.0899057|\n| 1.0| 148.0| 38504.0| 2.6704352|\n| 3.0| 148.0| 35924.0| 4.5422373|\n| 5.0| 148.0| 545.0| 4.002201|\n| 1.0| 148.0| 27947.0| 2.8855164|\n| 3.0| 148.0| 30656.0| 4.43265|\n| 1.0| 148.0| 38498.0| 3.7581713|\n| 5.0| 148.0| 8589.0| 4.236023|\n| 1.0| 148.0| 6693.0| 3.9375055|\n| 4.0| 148.0| 15711.0| 4.5767956|\n| 3.0| 148.0| 241.0| 3.6477094|\n| 4.0| 148.0| 495.0| 4.2112217|\n+------+-------------+------------+----------+\nonly showing top 20 rows\n\n"
]
],
[
[
"#**Evaluate the predictions**\nEvaluate the model by computing the MAE on the test data",
"_____no_output_____"
]
],
[
[
"# evaluate the predictions\nmae = evaluator.evaluate(predictions_1)\nprint('The Mean Absolute Error is %.3f' % (mae))",
"The Mean Absolute Error is 1.189\n"
]
],
[
[
"Our MAE score improved really well over our previous MAE of 4.027",
"_____no_output_____"
],
[
"#**Cross Validation**\n**Root Mean Square Error**",
"_____no_output_____"
]
],
[
[
"from pyspark.ml.recommendation import ALS\n# Build generic ALS model without hyperparameters\nals = ALS(userCol=\"UserId_index\", itemCol=\"MovieId_index\", ratingCol=\"Rating\", coldStartStrategy=\"drop\")\n",
"_____no_output_____"
],
[
"from pyspark.ml.tuning import ParamGridBuilder, CrossValidator\nfrom pyspark.ml.evaluation import RegressionEvaluator\n\nparam_grid = ParamGridBuilder() \\\n .addGrid(als.rank, [10, 25]) \\\n .addGrid(als.maxIter, [10]) \\\n .addGrid(als.regParam, [.01, .1]) \\\n .build() \n# Define evaluator as RMSE\nevaluator = RegressionEvaluator(metricName=\"rmse\", labelCol=\"Rating\",\n predictionCol=\"prediction\")\n\n# Print length of evaluator\nprint (\"Num models to be tested using param_grid: \", len(param_grid))",
"Num models to be tested using param_grid: 4\n"
],
[
"# Build cross validation step using CrossValidator\ncv = CrossValidator(estimator = als,\nestimatorParamMaps = param_grid,\nevaluator = evaluator,\nnumFolds = 5)\n\n# Run the cv on the training data\ncv_model = cv.fit(train)\n# Extract best combination of values from cross validation\nbest_model = cv_model.bestModel",
"_____no_output_____"
]
],
[
[
"Unfortunately there is currently no way in spark to see which combination of hyperparameters were used in the best model. We now use the best model to transform the test data and compute predictions & evaluate.",
"_____no_output_____"
],
[
"#**Make predictions on test_data**",
"_____no_output_____"
]
],
[
[
"# Generate test set predictions and evaluate using RMSE\npredictions_2 = best_model.transform(test)\n\n# View the predictions \npredictions_2.show()",
"+------+-------------+------------+----------+\n|Rating|MovieId_index|UserId_index|prediction|\n+------+-------------+------------+----------+\n| 5.0| 148.0| 918.0| 3.4595466|\n| 4.0| 148.0| 3124.0| 4.0765095|\n| 5.0| 148.0| 2907.0| 4.050404|\n| 5.0| 148.0| 2044.0| 4.590904|\n| 4.0| 148.0| 412.0| 4.3416543|\n| 3.0| 148.0| 76.0| 3.2247398|\n| 4.0| 148.0| 20403.0| 4.263033|\n| 5.0| 148.0| 1148.0| 3.085285|\n| 3.0| 148.0| 3037.0| 3.494959|\n| 4.0| 148.0| 548.0| 4.1852612|\n| 5.0| 148.0| 3502.0| 4.362046|\n| 5.0| 148.0| 4363.0| 3.6964378|\n| 5.0| 148.0| 6350.0| 3.918004|\n| 4.0| 148.0| 15159.0| 3.79642|\n| 5.0| 148.0| 2466.0| 3.5998642|\n| 4.0| 148.0| 2829.0| 3.7341557|\n| 2.0| 148.0| 2296.0| 3.6550403|\n| 4.0| 148.0| 15725.0| 4.1780167|\n| 5.0| 148.0| 308.0| 4.1085505|\n| 4.0| 148.0| 3890.0| 4.143096|\n+------+-------------+------------+----------+\nonly showing top 20 rows\n\n"
]
],
[
[
"#**Evaluate the predictions**\nEvaluate the model by computing the RMSE on the test data",
"_____no_output_____"
]
],
[
[
"rmse = evaluator.evaluate(predictions_2)\n# Print evaluation metrics and model parameters\nprint(\"**Best Model**\")\nprint(\"Root Mean Square Error: {:.3f}\".format(rmse))\nprint(\"RegParam: \"), best_model._java_obj.parent().getRegParam()",
"**Best Model**\nRoot Mean Square Error: 1.562\nRegParam: \n"
]
],
[
[
"Our RMSE score is improved a bit over our previous RMSE",
"_____no_output_____"
],
[
"#**Cross Validation**\n\n**Mean Absolute Error**",
"_____no_output_____"
]
],
[
[
"from pyspark.ml.recommendation import ALS\n# Build generic ALS model without hyperparameters\nals = ALS(userCol=\"UserId_index\", itemCol=\"MovieId_index\", ratingCol=\"Rating\", coldStartStrategy=\"drop\")\n",
"_____no_output_____"
],
[
"from pyspark.ml.tuning import ParamGridBuilder, CrossValidator\nfrom pyspark.ml.evaluation import RegressionEvaluator\n\nparam_grid = ParamGridBuilder() \\\n .addGrid(als.rank, [10, 25]) \\\n .addGrid(als.maxIter, [10]) \\\n .addGrid(als.regParam, [.01, .1]) \\\n .build()\n# Define evaluator as MAE\nevaluator = RegressionEvaluator(metricName=\"mae\", labelCol=\"Rating\",\n predictionCol=\"prediction\")\n\n# Print length of evaluator\nprint (\"Num models to be tested using param_grid: \", len(param_grid))",
"Num models to be tested using param_grid: 4\n"
],
[
"# Build cross validation step using CrossValidator\ncv_1 = CrossValidator(estimator = als,\nestimatorParamMaps = param_grid,\nevaluator = evaluator,\nnumFolds = 5)\n\n# Run the cv on the training data\ncv_model_1 = cv_1.fit(train)\n# Extract best combination of values from cross validation\nbest_model_1 = cv_model_1.bestModel",
"_____no_output_____"
]
],
[
[
"Unfortunately there is currently no way in spark to see which combination of hyperparameters were used in the best model. We now use the best model to transform the test data and compute predictions & evaluate.",
"_____no_output_____"
],
[
"#**Make predictions on test_data**",
"_____no_output_____"
]
],
[
[
"# Generate test set predictions and evaluate using MAE\npredictions_3 = best_model_1.transform(test)\n\n# View the predictions \npredictions_3.show()",
"+------+-------------+------------+----------+\n|Rating|MovieId_index|UserId_index|prediction|\n+------+-------------+------------+----------+\n| 5.0| 148.0| 2572.0| 3.9373004|\n| 5.0| 148.0| 918.0| 3.8579354|\n| 3.0| 148.0| 1675.0| 4.2139797|\n| 5.0| 148.0| 81.0| 3.5686483|\n| 5.0| 148.0| 1226.0| 4.609841|\n| 3.0| 148.0| 1869.0| 4.17163|\n| 5.0| 148.0| 2044.0| 4.6245217|\n| 5.0| 148.0| 1030.0| 4.5167594|\n| 3.0| 148.0| 76.0| 3.432896|\n| 5.0| 148.0| 2754.0| 4.27392|\n| 5.0| 148.0| 11677.0| 4.3751707|\n| 5.0| 148.0| 916.0| 3.806754|\n| 1.0| 148.0| 984.0| 3.302636|\n| 5.0| 148.0| 20578.0| 4.790682|\n| 4.0| 148.0| 548.0| 4.296843|\n| 5.0| 148.0| 3502.0| 4.5318446|\n| 5.0| 148.0| 6350.0| 4.0237274|\n| 3.0| 148.0| 1599.0| 3.8772266|\n| 4.0| 148.0| 15725.0| 3.6876824|\n| 5.0| 148.0| 674.0| 4.0111904|\n+------+-------------+------------+----------+\nonly showing top 20 rows\n\n"
]
],
[
[
"#**Evaluate the predictions**\nEvaluate the model by computing the MAE on the test data",
"_____no_output_____"
]
],
[
[
"mae = evaluator.evaluate(predictions_3)\n# Print evaluation metrics and model parameters\nprint(\"**Best Model**\")\nprint(\"Mean Absolute Error: {:.3f}\".format(mae))\nprint(\"RegParam: \"), best_model_1._java_obj.parent().getRegParam()",
"**Best Model**\nMean Absolute Error: 1.161\nRegParam: \n"
]
],
[
[
"Our MAE score is improved a bit over our previous MAE",
"_____no_output_____"
],
[
"# **Provide top Recommendations to all users**\n\nThe best model converged to, with the use of cross validation is used to provide recommendations for all users ",
"_____no_output_____"
]
],
[
[
"# Finally, using the best model to make recommendations for users\nALS_recommendations = best_model.recommendForAllUsers(numItems = 10)\nALS_recommendations.show(n = 10) ",
"+------------+--------------------+\n|UserId_index| recommendations|\n+------------+--------------------+\n| 148|[[230847, 5.99193...|\n| 463|[[378108, 5.98811...|\n| 471|[[100760, 5.94824...|\n| 496|[[144923, 5.83173...|\n| 833|[[217312, 6.21240...|\n| 1088|[[137656, 5.77013...|\n| 1238|[[217312, 6.09007...|\n| 1342|[[18794, 5.541206...|\n| 1580|[[58204, 5.744831...|\n| 1591|[[173694, 5.76754...|\n+------------+--------------------+\nonly showing top 10 rows\n\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
4a1861f6e454c25d23b78e5753ca50de6d593932
| 1,283 |
ipynb
|
Jupyter Notebook
|
Assignment Day 4-LetsUpgrad_PythonEssentials.ipynb
|
shalinik898/LetsUpgrade-Python-Essentials
|
42586e03cac3f11eae2ad5853610d1255796b270
|
[
"Apache-2.0"
] | null | null | null |
Assignment Day 4-LetsUpgrad_PythonEssentials.ipynb
|
shalinik898/LetsUpgrade-Python-Essentials
|
42586e03cac3f11eae2ad5853610d1255796b270
|
[
"Apache-2.0"
] | null | null | null |
Assignment Day 4-LetsUpgrad_PythonEssentials.ipynb
|
shalinik898/LetsUpgrade-Python-Essentials
|
42586e03cac3f11eae2ad5853610d1255796b270
|
[
"Apache-2.0"
] | null | null | null | 19.149254 | 67 | 0.462977 |
[
[
[
"# QUESTION 1",
"_____no_output_____"
]
],
[
[
"# Program to check Armstrong numbers in a certain interval\n\nlower = 1042000\nupper = 702648265\n\nfor num in range(lower, upper + 1):\n \n\n sum = 0\n\n temp = num\n while temp > 0:\n digit = temp % 10\n sum += digit ** 3\n temp //= 10\n\n if num == sum:\n print('The first armstrong number is ' ,num)\n break",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
]
] |
4a18644f7e124a74fc07b0d407ef42de069a87e1
| 7,094 |
ipynb
|
Jupyter Notebook
|
code/.ipynb_checkpoints/Tf_Idf_Vectorizer-checkpoint.ipynb
|
Leo-Olive/law_learning
|
e34c65e7620ef56274f344ae65279b3132443094
|
[
"Unlicense"
] | 10 |
2020-04-02T00:36:40.000Z
|
2022-02-16T05:22:28.000Z
|
code/.ipynb_checkpoints/Tf_Idf_Vectorizer-checkpoint.ipynb
|
Leo-Olive/law_learning
|
e34c65e7620ef56274f344ae65279b3132443094
|
[
"Unlicense"
] | 2 |
2020-10-02T06:45:01.000Z
|
2020-10-12T03:13:28.000Z
|
code/.ipynb_checkpoints/Tf_Idf_Vectorizer-checkpoint.ipynb
|
Leo-Olive/law_learning
|
e34c65e7620ef56274f344ae65279b3132443094
|
[
"Unlicense"
] | 3 |
2020-02-07T05:19:07.000Z
|
2020-07-24T09:09:21.000Z
| 21.760736 | 608 | 0.533127 |
[
[
[
"<h1>目录<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#因变量重编码\" data-toc-modified-id=\"因变量重编码-1\"><span class=\"toc-item-num\">1 </span>因变量重编码</a></span></li><li><span><a href=\"#Tf-Idf向量化\" data-toc-modified-id=\"Tf-Idf向量化-2\"><span class=\"toc-item-num\">2 </span>Tf-Idf向量化</a></span></li><li><span><a href=\"#Hashing向量化\" data-toc-modified-id=\"Hashing向量化-3\"><span class=\"toc-item-num\">3 </span>Hashing向量化</a></span></li><li><span><a href=\"#结论\" data-toc-modified-id=\"结论-4\"><span class=\"toc-item-num\">4 </span>结论</a></span></li></ul></div>",
"_____no_output_____"
]
],
[
[
"import numpy as np \nimport pandas as pd \nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport jieba\nimport re\nfrom useful_tools import * # 导入自编函数",
"Building prefix dict from the default dictionary ...\nLoading model from cache C:\\Users\\xsong\\AppData\\Local\\Temp\\jieba.cache\nLoading model cost 0.930 seconds.\nPrefix dict has been built succesfully.\n"
],
[
"# 验证集和停用词\ntrain = pd.read_csv(\"../data/TrainSet.csv\")",
"_____no_output_____"
]
],
[
[
"# 因变量重编码",
"_____no_output_____"
]
],
[
[
"from sklearn.preprocessing import LabelEncoder\nle = LabelEncoder()\ny = le.fit_transform(train['class'])",
"_____no_output_____"
]
],
[
[
"# Tf-Idf向量化",
"_____no_output_____"
]
],
[
[
"cutWords_series = train['content'].apply(lambda x: get_cutword(x)) # 得到的是pandas series\ntfidf = get_vectorize(cutWords_series,vector = 'TfidfVectorizer')",
"_____no_output_____"
],
[
"from sklearn.naive_bayes import MultinomialNB\nMultinomialNB = MultinomialNB() # 朴素贝叶斯\ncross_print_info(MultinomialNB, tfidf, y, cv=10)",
"10 折交叉验证准确率为 0.735\n"
],
[
"from sklearn.naive_bayes import BernoulliNB\nBernoulliNB = BernoulliNB() # 朴素贝叶斯\ncross_print_info(BernoulliNB, tfidf, y, cv=10)",
"10 折交叉验证准确率为 0.729\n"
],
[
"from sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nreg = AdaBoostClassifier(DecisionTreeClassifier()) # AdaBoost\ncross_print_info(reg, tfidf, y, cv=10)",
"10 折交叉验证准确率为 0.617\n"
],
[
"from sklearn.ensemble import RandomForestClassifier\nrf = RandomForestClassifier(n_estimators=50) # 随机森林\ncross_print_info(rf, tfidf, y, cv=10)",
"10 折交叉验证准确率为 0.737\n"
],
[
"import xgboost as xgb\nmodel = xgb.XGBClassifier(n_estimators=100) # XGBoost\ncross_print_info(model, tfidf, y, cv=10)",
"10 折交叉验证准确率为 0.704\n"
]
],
[
[
"基准:rf0.686 xgb0.7",
"_____no_output_____"
]
],
[
[
"from sklearn.svm import SVC\nsvmc = SVC(gamma='auto') # 支持向量机\ncross_print_info(svmc, tfidf, y, cv = 10)",
"_____no_output_____"
]
],
[
[
"# Hashing向量化",
"_____no_output_____"
]
],
[
[
"cutWords_series = train['content'].apply(lambda x: get_cutword(x)) # 得到的是pandas series\nHash = get_vectorize(cutWords_series,vector = 'HashingVectorizer')",
"_____no_output_____"
],
[
"cross_print_info(reg, Hash, y, cv=10) # AdaBoost",
"10 折交叉验证准确率为 0.544\n"
],
[
"cross_print_info(rf, Hash, y, cv=10) # 随机森林",
"10 折交叉验证准确率为 0.69\n"
],
[
"cross_print_info(model, Hash, y, cv=10) # XGBoost",
"10 折交叉验证准确率为 0.633\n"
],
[
"cross_print_info(svmc, Hash, y, cv = 10) # 支持向量机",
"_____no_output_____"
]
],
[
[
"# 结论\nTf-Idf取得了很高的准确率,高过了词频方法。Hashing方法表现较差。",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
4a1885816db87fca5d43a64c7467d4d31c755dcb
| 2,076 |
ipynb
|
Jupyter Notebook
|
jupyter_notebooks/decorators/UnderstandingDecorators.ipynb
|
manual123/Nacho-Jupyter-Notebooks
|
e75523434b1a90313a6b44e32b056f63de8a7135
|
[
"MIT"
] | 2 |
2021-02-13T05:52:05.000Z
|
2022-02-08T09:52:35.000Z
|
jupyter_notebooks/decorators/UnderstandingDecorators.ipynb
|
manual123/Nacho-Jupyter-Notebooks
|
e75523434b1a90313a6b44e32b056f63de8a7135
|
[
"MIT"
] | null | null | null |
jupyter_notebooks/decorators/UnderstandingDecorators.ipynb
|
manual123/Nacho-Jupyter-Notebooks
|
e75523434b1a90313a6b44e32b056f63de8a7135
|
[
"MIT"
] | null | null | null | 21.852632 | 93 | 0.499518 |
[
[
[
"# Understanding Decorators",
"_____no_output_____"
]
],
[
[
"#This is the decorator\ndef print_args(func):\n def inner_func(*args, **kwargs):\n print(args)\n print(kwargs)\n return func(*args, **kwargs) #Call the original function with its arguments.\n return inner_func\n\n@print_args\ndef multiply(num_a, num_b):\n return num_a * num_b\n \nprint(multiply(3, 5))",
"(3, 5)\n{}\n15\n"
],
[
"#This is the decorator\ndef time_greeting(func):\n def wrapped_funcion(*args, **kwargs):\n print(f\"Greeting with timestamp: {datetime.now()} Hello {args[0]}!\")\n print(kwargs)\n return func(*args, **kwargs) #Call the original function with its arguments.\n return wrapped_funcion\n\n@time_greeting\ndef greeting(name):\n return f'Hello, {name}!'\n \nprint(greeting('Daniel'))",
"Greeting with timestamp: 2018-04-07 22:51:01.040504 Hello Daniel!\n{}\nHello, Daniel!\n"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code"
]
] |
4a188aa29429a5d49e57dff77c8401c00d3d152d
| 3,435 |
ipynb
|
Jupyter Notebook
|
Activity_1_Python_Fundamentals.ipynb
|
ja-nana/Linear-Algebra_ChE_2nd-Sem-2021
|
f56e23e22f147b317e85340f0dc6eea76fbbf64b
|
[
"Apache-2.0"
] | null | null | null |
Activity_1_Python_Fundamentals.ipynb
|
ja-nana/Linear-Algebra_ChE_2nd-Sem-2021
|
f56e23e22f147b317e85340f0dc6eea76fbbf64b
|
[
"Apache-2.0"
] | null | null | null |
Activity_1_Python_Fundamentals.ipynb
|
ja-nana/Linear-Algebra_ChE_2nd-Sem-2021
|
f56e23e22f147b317e85340f0dc6eea76fbbf64b
|
[
"Apache-2.0"
] | null | null | null | 22.16129 | 266 | 0.435226 |
[
[
[
"<a href=\"https://colab.research.google.com/github/ja-nana/Linear-Algebra_ChE_2nd-Sem-2021/blob/main/Activity_1_Python_Fundamentals.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
],
[
[
"# Welcome to Python Fundamentals\nIn this module, we are going to establish or review our skills in Python programming. In this notebook we are going to cover:\n* Variables and Data Types \n* Operations\n* Input and Output Operations\n* Logic Control\n* Iterables\n* Functions\n",
"_____no_output_____"
],
[
"## Variable and Data Types",
"_____no_output_____"
]
],
[
[
"x = 1\ny,z = 0, -1\ny",
"_____no_output_____"
],
[
"type(x)",
"_____no_output_____"
]
],
[
[
"## Operations\n### Arithmetic\n",
"_____no_output_____"
]
],
[
[
"#Addition\na,b,c,d = 1,2,3,4\n",
"_____no_output_____"
],
[
"S= a+b\nS",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4a188f34badcd85e14ea8f96c62a75419816b12d
| 39,040 |
ipynb
|
Jupyter Notebook
|
intro-to-pytorch/Part 4 - Fashion-MNIST (Exercises).ipynb
|
subham73/deep-learning-v2-pytorch
|
a200ee8d644fa176fc6f2b663cac6cc0207c1b40
|
[
"MIT"
] | null | null | null |
intro-to-pytorch/Part 4 - Fashion-MNIST (Exercises).ipynb
|
subham73/deep-learning-v2-pytorch
|
a200ee8d644fa176fc6f2b663cac6cc0207c1b40
|
[
"MIT"
] | null | null | null |
intro-to-pytorch/Part 4 - Fashion-MNIST (Exercises).ipynb
|
subham73/deep-learning-v2-pytorch
|
a200ee8d644fa176fc6f2b663cac6cc0207c1b40
|
[
"MIT"
] | null | null | null | 147.320755 | 25,432 | 0.880328 |
[
[
[
"# Classifying Fashion-MNIST\n\nNow it's your turn to build and train a neural network. You'll be using the [Fashion-MNIST dataset](https://github.com/zalandoresearch/fashion-mnist), a drop-in replacement for the MNIST dataset. MNIST is actually quite trivial with neural networks where you can easily achieve better than 97% accuracy. Fashion-MNIST is a set of 28x28 greyscale images of clothes. It's more complex than MNIST, so it's a better representation of the actual performance of your network, and a better representation of datasets you'll use in the real world.\n\n<img src='assets/fashion-mnist-sprite.png' width=500px>\n\nIn this notebook, you'll build your own neural network. For the most part, you could just copy and paste the code from Part 3, but you wouldn't be learning. It's important for you to write the code yourself and get it to work. Feel free to consult the previous notebooks though as you work through this.\n\nFirst off, let's load the dataset through torchvision.",
"_____no_output_____"
]
],
[
[
"import os\nos.environ['KMP_DUPLICATE_LIB_OK']='True'",
"_____no_output_____"
],
[
"import torch\nfrom torchvision import datasets, transforms\nimport helper\n\n# Define a transform to normalize the data\ntransform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.5,), (0.5,))])\n# Download and load the training data\ntrainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform)\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)\n\n# Download and load the test data\ntestset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform)\ntestloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True)",
"C:\\Users\\dell\\anaconda3\\envs\\kernel_torch\\lib\\site-packages\\torchvision\\datasets\\mnist.py:498: UserWarning: The given NumPy array is not writeable, and PyTorch does not support non-writeable tensors. This means you can write to the underlying (supposedly non-writeable) NumPy array using the tensor. You may want to copy the array to protect its data or make it writeable before converting it to a tensor. This type of warning will be suppressed for the rest of this program. (Triggered internally at ..\\torch\\csrc\\utils\\tensor_numpy.cpp:180.)\n return torch.from_numpy(parsed.astype(m[2], copy=False)).view(*s)\n"
]
],
[
[
"Here we can see one of the images.",
"_____no_output_____"
]
],
[
[
"image, label = next(iter(trainloader))\nhelper.imshow(image[0,:]);",
"_____no_output_____"
]
],
[
[
"## Building the network\n\nHere you should define your network. As with MNIST, each image is 28x28 which is a total of 784 pixels, and there are 10 classes. You should include at least one hidden layer. We suggest you use ReLU activations for the layers and to return the logits or log-softmax from the forward pass. It's up to you how many layers you add and the size of those layers.",
"_____no_output_____"
]
],
[
[
"# TODO: Define your network architecture here\nfrom torch import nn\nfrom collections import OrderedDict\nmodel = nn.Sequential(OrderedDict([\n ('fc1',nn.Linear(784, 256)),\n ('relu1',nn.ReLU()),\n ('fc2',nn.Linear(256, 128)),\n ('relu2',nn.ReLU()),\n ('fc3',nn.Linear(128, 64)),\n ('relu3',nn.ReLU()),\n ('fc4',nn.Linear(64, 10)),\n ('lgSoft1',nn.LogSoftmax(dim = 1))\n]))\n",
"_____no_output_____"
]
],
[
[
"# Train the network\n\nNow you should create your network and train it. First you'll want to define [the criterion](http://pytorch.org/docs/master/nn.html#loss-functions) ( something like `nn.CrossEntropyLoss`) and [the optimizer](http://pytorch.org/docs/master/optim.html) (typically `optim.SGD` or `optim.Adam`).\n\nThen write the training code. Remember the training pass is a fairly straightforward process:\n\n* Make a forward pass through the network to get the logits \n* Use the logits to calculate the loss\n* Perform a backward pass through the network with `loss.backward()` to calculate the gradients\n* Take a step with the optimizer to update the weights\n\nBy adjusting the hyperparameters (hidden units, learning rate, etc), you should be able to get the training loss below 0.4.",
"_____no_output_____"
]
],
[
[
"# TODO: Create the network, define the criterion and optimizer\ncriterion = nn.CrossEntropyLoss()\nimport torch.optim as optim\noptimizer = optim.SGD(model.parameters(), lr = 0.01)",
"_____no_output_____"
],
[
"# TODO: Train the network here\nepoch = 10\nfor e in range(epoch):\n running_loss = 0;\n for images, labels in trainloader:\n images = images.view(images.shape[0], -1)#flattening\n \n #resetting optimizer value \n optimizer.zero_grad()\n \n output = model(images)\n loss = criterion(output, labels)\n \n loss.backward()\n optimizer.step()\n running_loss +=loss.item()\n else:\n print(f\"training loss: {running_loss/ len(trainloader)}\")",
"training loss: 0.556966072492508\ntraining loss: 0.49638236839888195\ntraining loss: 0.462184978859511\ntraining loss: 0.4375915310657355\ntraining loss: 0.41876496360309595\ntraining loss: 0.4022402378287651\ntraining loss: 0.3872173265067499\ntraining loss: 0.37480896020304166\ntraining loss: 0.36377009915422276\ntraining loss: 0.35314096156150293\n"
],
[
"%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\nimport torch.nn.functional as F\nimport helper\n\n# Test out your network!\n\ndataiter = iter(testloader)\nimages, labels = dataiter.next()\nimg = images[0]\n# Convert 2D image to 1D vector\nimg = img.resize_(1, 784)\n\n# TODO: Calculate the class probabilities (softmax) for img\nps = torch.exp(model(img))\n\n# Plot the image and probabilities\nhelper.view_classify(img.resize_(1, 28, 28), ps, version='Fashion')",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
4a1891e2a530aced63e36659d96744917418ec87
| 161,897 |
ipynb
|
Jupyter Notebook
|
ELH_Presentation+Code.ipynb
|
StarlingProjects/Ellons_Little_Helpers
|
e884fbb895716892cccd9579e8b91c6e075fa966
|
[
"Unlicense"
] | null | null | null |
ELH_Presentation+Code.ipynb
|
StarlingProjects/Ellons_Little_Helpers
|
e884fbb895716892cccd9579e8b91c6e075fa966
|
[
"Unlicense"
] | null | null | null |
ELH_Presentation+Code.ipynb
|
StarlingProjects/Ellons_Little_Helpers
|
e884fbb895716892cccd9579e8b91c6e075fa966
|
[
"Unlicense"
] | null | null | null | 117.146889 | 19,076 | 0.849923 |
[
[
[
"# This cell is used to change parameter of the rise slideshow, \n# such as the window width/height and enabling a scroll bar\n\nfrom notebook.services.config import ConfigManager\ncm = ConfigManager()\ncm.update('livereveal', {\n 'width': 1000,\n 'height': 600,\n 'scroll': True,\n})",
"_____no_output_____"
]
],
[
[
"# OGTC Hackathon: Data analysis from Tesla vehicle\n* by: Elon's Little Helpers\n * Chinedu Pascal Ezenkwu\n * Carlos Moreno-Garcia\n * John Guntoro\n * Joseph Sheratt\n * Darren Nicol",
"_____no_output_____"
],
[
"## Problem Setting",
"_____no_output_____"
],
[
"* By reverse-engineering the Tesla API, Intelligent Plant was able to download the data historian of the usage of a Tesla car (three months).",
"_____no_output_____"
],
[
"* $\\approx182$ variables can be obtained.",
"_____no_output_____"
],
[
"* More information of these variable can be found [here](https://tesla-api.timdorr.com) (unofficial).",
"_____no_output_____"
],
[
"* Some information that can be obtained:\n * Screen status\n * Battery charge\n * Odometer data\n * Heating\n * Windshields\n * Fan\n * And much more!",
"_____no_output_____"
],
[
"## Gestalt tool API\n* By querying $*car*$, one can visualise the different variables:",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"* You can plot different variables to see the performance across time:",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"* The spike-down is a moment where no data was recorded.",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"* All data that falls above 0 is considered screen on (i.e. car on)\n* 8 is a mode called \"dog mode\", which monitors an animal inside the car!",
"_____no_output_____"
],
[
"## Data Analysis",
"_____no_output_____"
],
[
"**Installing the necessary packages**",
"_____no_output_____"
]
],
[
[
"%pip install pandas matplotlib",
"_____no_output_____"
]
],
[
[
"**Importing the necessary packages**",
"_____no_output_____"
]
],
[
[
"import intelligent_plant.app_store_client as app_store_client\nimport intelligent_plant.utility as utility\nfrom os.path import expanduser\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom math import *",
"_____no_output_____"
]
],
[
[
"**Getting the data from the repository**",
"_____no_output_____"
]
],
[
[
"app_store = app_store_client.AppStoreClient(open(expanduser(\"~/.access_token\"), \"r\").read())\ndata_core = app_store.get_data_core_client()\napp_store.get_data_core_client()\napp_store.get_user_info()\ndata_core.get_tags(\"FCBB05262EADC0B147746EE6DFB2B3EA5C272C33C2C5E3FE8F473D85529461CA.Edge Historian\")\ntags = {\n \"FCBB05262EADC0B147746EE6DFB2B3EA5C272C33C2C5E3FE8F473D85529461CA.Edge Historian\": [\n \"StevesCar..response.vehicle_state.odometer\",\n \"StevesCar..response.charge_state.battery_level\",\n \"StevesCar..response.climate_state.outside_temp\",\n \"StevesCar..response.vehicle_state.center_display_state\",\n \"StevesCar..response.drive_state.speed\",\n \"StevesCar..response.climate_state.seat_heater_right\"\n ]\n}",
"_____no_output_____"
]
],
[
[
"**Data filtering**\n* All interpolated data from 10 days ago in 3-minute intervals.",
"_____no_output_____"
]
],
[
[
"resp = data_core.get_processed_data(tags, \"*-10d\", \"*\", \"3m\", \"interp\")",
"_____no_output_____"
]
],
[
[
"**Plotting odometer data**",
"_____no_output_____"
]
],
[
[
"df = utility.query_result_to_data_frame(resp)\nplt.plot(df[\"StevesCar..response.vehicle_state.odometer\"])\nplt.xlabel('Data Index')\nplt.ylabel('Odometer miles')\nplt.show()",
"_____no_output_____"
]
],
[
[
"**Creation of a *new* differential dataset** ",
"_____no_output_____"
]
],
[
[
"import pandas as pd\ntype(df.loc[0].TimeStamp)\ndf.loc[1].TimeStamp-df.loc[0].TimeStamp\ndf_new=[]\nfor i in range(1,len(df)-1):\n df_new.append(df.loc[i]-df.loc[i-1])\ndf_new =pd.DataFrame(df_new)",
"_____no_output_____"
]
],
[
[
"* This dataset could be used for time series analysis purposes.",
"_____no_output_____"
],
[
"**Distance travelled**",
"_____no_output_____"
]
],
[
[
"plt.plot(df_new[\"StevesCar..response.vehicle_state.odometer\"])\nplt.ylabel('Distance travelled in time intervaled (miles)')\nplt.xlabel('Data Index')",
"_____no_output_____"
]
],
[
[
"* By plotting the differential of the odometer, we can see the distance travelled (in miles) for every entry.",
"_____no_output_____"
],
[
"**Plotting the differential data on the battery level**",
"_____no_output_____"
]
],
[
[
"plt.plot(df_new[\"StevesCar..response.charge_state.battery_level\"])\nplt.ylabel('Change in Battery % in time interval')\nplt.xlabel('Data Index')",
"_____no_output_____"
]
],
[
[
"* Negative entries occur when battery discharges and vice versa.",
"_____no_output_____"
],
[
"**Plotting efficiency**\n* We created our own efficiency metric as the ratio between battery level differential and odometer differential.",
"_____no_output_____"
]
],
[
[
"df_new['Efficiency'] = df_new[\"StevesCar..response.charge_state.battery_level\"]/df_new[\"StevesCar..response.vehicle_state.odometer\"]\nplt.plot(df_new['Efficiency'], 'bo')\nplt.ylim([-1, 0])\nplt.ylabel('Battery % drop per mile')\nplt.xlabel('Data Index') \nplt.show()",
"_____no_output_____"
]
],
[
[
"* Data seems to be random as for each entry, we obtain a very spread distribution.\n* We only want to see efficiency from 0 to -1, as others would be considered outliers\n * Very huge or very little efficiencies don't make sense, as you cannot have more battery level than actual mileage.",
"_____no_output_____"
],
[
"**Another view of the data**",
"_____no_output_____"
]
],
[
[
"df_new.replace(inf, 0)\ndf_new.replace(-inf, 0)\nplt.plot(df_new['Efficiency'], 'bo')",
"_____no_output_____"
]
],
[
[
"* Here we can see the presence of outliers.",
"_____no_output_____"
],
[
"**Histogram of efficiency metric**",
"_____no_output_____"
]
],
[
[
"eff=np.array(df_new[\"Efficiency\"])\nsh=np.array(df_new[\"StevesCar..response.climate_state.seat_heater_right\"])\n# Limit the efficiency between 0 and -3\neff2= eff[np.where(eff>-3)]\neff3 = eff2[np.where(eff2<0)]\nplt.hist(eff3,bins=20)\nplt.xlabel('Battery % drop per mile')\nplt.ylabel('Number')\nplt.show()",
"/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:7: RuntimeWarning: invalid value encountered in greater\n import sys\n"
]
],
[
[
"**Efficiency vs other variables**\n* Here we compare our efficiency metric against other variables, such as speed or outside temp.\n* Purpose: Try to find correlations.\n* Speed may show some correlation, outside temperature does not.",
"_____no_output_____"
]
],
[
[
"temp = np.array(df[\"StevesCar..response.climate_state.outside_temp\"])[0:-2]\nspeed = np.array(df[\"StevesCar..response.drive_state.speed\"])[0:-2]\n\ntemp2 = temp[np.where(eff>-3)]\ntemp3 = temp2[np.where(eff2<0)]\nspeed2 = speed[np.where(eff>-3)]\nspeed3 = speed2[np.where(eff2<0)]\n\nplt.plot(speed3, eff3, 'bo')\nplt.xlim([0, 60])\nplt.ylabel('Battery % drop per mile')\nplt.xlabel('Speed mph')\nplt.show()\n\nplt.plot(temp3, eff3, 'ro')\nplt.ylabel('Battery % drop per mile')\nplt.xlabel('Outside Temp degC')\nplt.show()",
"/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:4: RuntimeWarning: invalid value encountered in greater\n after removing the cwd from sys.path.\n/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:6: RuntimeWarning: invalid value encountered in greater\n \n"
]
],
[
[
"**Measuring the effect of heating the driver's seat**\n* We were also able to isolate this measurement and compare efficiency w.r.t. right seat heated/unheated.\n* There does not seem to be a change!",
"_____no_output_____"
]
],
[
[
"sh = np.array(df[\"StevesCar..response.climate_state.seat_heater_right\"])[0:-2]\n\neff_without_sh = eff[np.where(sh==0)]\neff_with_sh = eff[np.where(sh>0)]\n\nprint(len(eff_without_sh))\nprint(len(eff_with_sh))\n\neff2_without= eff_without_sh[np.where(eff_without_sh>-2)]\neff3_without = eff2_without[np.where(eff2_without<0)]\n\nspeed_without = speed[np.where(sh==0)]\nspeed2_without = speed_without[np.where(eff_without_sh>-2)]\nspeed3_without = speed2_without[np.where(eff2_without<0)]\n\neff2_with= eff_with_sh[np.where(eff_with_sh>-2)]\neff3_with = eff2_with[np.where(eff2_with<0)]\n\nspeed_with = speed[np.where(sh>0)]\nspeed2_with = speed_with[np.where(eff_with_sh>-2)]\nspeed3_with = speed2_with[np.where(eff2_with<0)]\n\nplt.plot(speed3_without, eff3_without, 'bo', label='Without Seat Heater')\nplt.plot(speed3_with, eff3_with, 'ro',label='With Seat Heater')\nplt.xlim([0, 60])\nplt.xlabel('Speed mph')\nplt.ylabel('Battery % drop per mile')\nplt.legend()\nplt.show()",
"2796\n1922\n"
]
],
[
[
"# If we had more time...",
"_____no_output_____"
],
[
"* Investigate the dataset in-depth",
"_____no_output_____"
],
[
"* Discover new relations",
"_____no_output_____"
],
[
"* Train a regression model to predict efficiency",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
4a189b05ee7b05584277719d3449827869e9e73f
| 2,718 |
ipynb
|
Jupyter Notebook
|
lesson_notes/Lesson2.ipynb
|
tongxinw/Spark_study
|
c84fbe738e774694de826735fab1e7a08db46bc2
|
[
"Apache-2.0"
] | null | null | null |
lesson_notes/Lesson2.ipynb
|
tongxinw/Spark_study
|
c84fbe738e774694de826735fab1e7a08db46bc2
|
[
"Apache-2.0"
] | null | null | null |
lesson_notes/Lesson2.ipynb
|
tongxinw/Spark_study
|
c84fbe738e774694de826735fab1e7a08db46bc2
|
[
"Apache-2.0"
] | null | null | null | 30.2 | 294 | 0.591244 |
[
[
[
"## Lesson 2",
"_____no_output_____"
],
[
"### I. Four key computer components:\n- CPU, the brain, CPU operation 0.4ns\n- memory (RAM), memory reference 100ns\n- storage, random read from SSD 16us\n- network, data from China to US 150 ms\n\n#### Details\nCPUs are fast, but memory is not. Thus, most time CPU will be IDLE to wait for datas to load.\n\nCPU, RAM, and SSD have improved along time, but network speed is still left behind. This means we nned to aim on reduce moving data from machine to machine using networks. \n\n\n\nIf a dataset is larger than the size of your RAM, you might still be able to analyze the data on a single computer. By default, the Python pandas library will read in an entire dataset from disk into memory. If the dataset is larger than your computer's memory, the program won't work.\n\nHowever, the Python pandas library can read in a file in smaller chunks. Thus, if you were going to calculate summary statistics about the dataset such as a sum or count, you could read in a part of the dataset at a time and accumulate the sum or count.\n\nExample: \n```\nreader = pd.read_csv('tmp.sv', sep='|', chunksize=4)\n\nIn [191]: reader\nOut[191]: <pandas.io.parsers.TextFileReader at 0x7f3d18adb350>\n\nIn [192]: for chunk in reader:\n .....: print(chunk)\n```\n\nThe difference between distributed computing and parallel computing\n",
"_____no_output_____"
],
[
"## Map Reduce\n### Steps in Mapreduce\n- map: get (key, value) pairs\n- shuffle: same key files go to the same CPU/machine/cluster node\n- reduce: calculate\n\n### Spark\nLimitations: high latency in streaming compare to Flink and Storm",
"_____no_output_____"
]
]
] |
[
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown"
]
] |
4a18a2b3d9559f5792c30ade467bb55ff9725913
| 7,727 |
ipynb
|
Jupyter Notebook
|
notebooks/deep_learning_intro/raw/tut2.ipynb
|
GAGAN2608/learntools
|
eaa43e7d10fa7cf8ef09607e00ba8b03a4efb041
|
[
"Apache-2.0"
] | null | null | null |
notebooks/deep_learning_intro/raw/tut2.ipynb
|
GAGAN2608/learntools
|
eaa43e7d10fa7cf8ef09607e00ba8b03a4efb041
|
[
"Apache-2.0"
] | null | null | null |
notebooks/deep_learning_intro/raw/tut2.ipynb
|
GAGAN2608/learntools
|
eaa43e7d10fa7cf8ef09607e00ba8b03a4efb041
|
[
"Apache-2.0"
] | null | null | null | 55.992754 | 828 | 0.658341 |
[
[
[
"# Introduction #\n\nIn this lesson we're going to see how we can build neural networks capable of learning the complex kinds of relationships deep neural nets are famous for.\n\nThe key idea here is *modularity*, building up a complex network from simpler functional units. We've seen how a linear unit computes a linear function -- now we'll see how to combine and modify these single units to model more complex relationships.\n\n# Layers #\n\nNeural networks typically organize their neurons into **layers**. When we collect together linear units having a common set of inputs we get a **dense** layer.\n\n<figure style=\"padding: 1em;\">\n<img src=\"https://i.imgur.com/2MA4iMV.png\" width=\"300\" alt=\"A stack of three circles in an input layer connected to two circles in a dense layer.\">\n<figcaption style=\"textalign: center; font-style: italic\"><center>A dense layer of two linear units receiving two inputs and a bias.\n</center></figcaption>\n</figure>\n\nYou could think of each layer in a neural network as performing some kind of relatively simple transformation. Through a deep stack of layers, a neural network can transform its inputs in more and more complex ways. In a well-trained neural network, each layer is a transformation getting us a little bit closer to a solution.\n\n<blockquote style=\"margin-right:auto; margin-left:auto; background-color: #ebf9ff; padding: 1em; margin:24px;\">\n <strong>Many Kinds of Layers</strong><br>\nA \"layer\" in Keras is a very general kind of thing. A layer can be, essentially, any kind of <em>data transformation</em>. Many layers, like the <a href=\"https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv2D\">convolutional</a> and <a href=\"https://www.tensorflow.org/api_docs/python/tf/keras/layers/RNN\">recurrent</a> layers, transform data through use of neurons and differ primarily in the pattern of connections they form. Others though are used for <a href=\"https://www.tensorflow.org/api_docs/python/tf/keras/layers/Embedding\">feature engineering</a> or just <a href=\"https://www.tensorflow.org/api_docs/python/tf/keras/layers/Add\">simple arithmetic</a>. There's a whole world of layers to discover -- <a href=\"https://www.tensorflow.org/api_docs/python/tf/keras/layers\">check them out</a>!\n</blockquote>\n\n# The Activation Function #\n\nIt turns out, however, that two dense layers with nothing in between are no better than a single dense layer by itself. Dense layers by themselves can never move us out of the world of lines and planes. What we need is something *nonlinear*. What we need are activation functions.\n\n<figure style=\"padding: 1em;\">\n<img src=\"https://i.imgur.com/OLSUEYT.png\" width=\"400\" alt=\" \">\n<figcaption style=\"textalign: center; font-style: italic\"><center>Without activation functions, neural networks can only learn linear relationships. In order to fit curves, we'll need to use activation functions. \n</center></figcaption>\n</figure>\n\nAn **activation function** is simply some function we apply to each of a layer's outputs (its *activations*). The most common is the *rectifier* function $max(0, x)$.\n\n<figure style=\"padding: 1em;\">\n<img src=\"https://i.imgur.com/aeIyAlF.png\" width=\"400\" alt=\"A graph of the rectifier function. The line y=x when x>0 and y=0 when x<0, making a 'hinge' shape like '_/'.\">\n<figcaption style=\"textalign: center; font-style: italic\"><center>\n</center></figcaption>\n</figure>\n\nThe rectifier function has a graph that's a line with the negative part \"rectified\" to zero. Applying the function to the outputs of a neuron will put a *bend* in the data, moving us away from simple lines.\n\nWhen we attach the rectifier to a linear unit, we get a **rectified linear unit** or **ReLU**. (For this reason, it's common to call the rectifier function the \"ReLU function\".) Applying a ReLU activation to a linear unit means the output becomes `max(0, w * x + b)`, which we might draw in a diagram like:\n\n<figure style=\"padding: 1em;\">\n<img src=\"https://i.imgur.com/eFry7Yu.png\" width=\"250\" alt=\"Diagram of a single ReLU. Like a linear unit, but instead of a '+' symbol we now have a hinge '_/'. \">\n<figcaption style=\"textalign: center; font-style: italic\"><center>A rectified linear unit.\n</center></figcaption>\n</figure>",
"_____no_output_____"
],
[
"# Stacking Dense Layers #\n\nNow that we have some nonlinearity, let's see how we can stack layers to get complex data transformations.\n\n<figure style=\"padding: 1em;\">\n<img src=\"https://i.imgur.com/Y5iwFQZ.png\" width=\"450\" alt=\"An input layer, two hidden layers, and a final linear layer.\">\n<figcaption style=\"textalign: center; font-style: italic\"><center>A stack of dense layers makes a \"fully-connected\" network.\n</center></figcaption>\n</figure>\n\nThe layers before the output layer are sometimes called **hidden** since we never see their outputs directly. And though we haven't shown them in this diagram each of these neurons would also be receiving a bias (one bias for each neuron).\n\nNow, notice that the final (output) layer is a linear unit (meaning, no activation function). That makes this network appropriate to a regression task, where we are trying to predict some arbitrary numeric value. Other tasks (like classification) might require an activation function on the output.\n\n## Building Sequential Models ##\n\nThe `Sequential` model we've been using will connect together a list of layers in order from first to last: the first layer gets the input, the last layer produces the output. This creates the model in the figure above:",
"_____no_output_____"
]
],
[
[
"from tensorflow import keras\nfrom tensorflow.keras import layers\n\nmodel = keras.Sequential([\n # the hidden ReLU layers\n layers.Dense(units=4, activation='relu', input_shape=[2]),\n layers.Dense(units=3, activation='relu'),\n # the linear output layer \n layers.Dense(units=1),\n])",
"_____no_output_____"
]
],
[
[
"Be sure to pass all the layers together in a list, like `[layer, layer, layer, ...]`, instead of as separate arguments. To add an activation function to a layer, just give its name in the `activation` argument.\n\n# Your Turn #\n\nNow, [**create a deep neural network**](#$NEXT_NOTEBOOK_URL$) for the *Concrete* dataset.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a18aadd95d9bd41ae43aeb31ee769224c2f15b3
| 6,909 |
ipynb
|
Jupyter Notebook
|
examples/notebook/contrib/traffic_lights.ipynb
|
tias/or-tools
|
b37d9c786b69128f3505f15beca09e89bf078a89
|
[
"Apache-2.0"
] | 1 |
2021-05-25T01:42:03.000Z
|
2021-05-25T01:42:03.000Z
|
examples/notebook/contrib/traffic_lights.ipynb
|
tias/or-tools
|
b37d9c786b69128f3505f15beca09e89bf078a89
|
[
"Apache-2.0"
] | null | null | null |
examples/notebook/contrib/traffic_lights.ipynb
|
tias/or-tools
|
b37d9c786b69128f3505f15beca09e89bf078a89
|
[
"Apache-2.0"
] | 1 |
2021-07-24T22:52:41.000Z
|
2021-07-24T22:52:41.000Z
| 33.867647 | 252 | 0.556231 |
[
[
[
"empty"
]
]
] |
[
"empty"
] |
[
[
"empty"
]
] |
4a18af0fd1b2b020bfdece2760070cb45f841b6f
| 283,360 |
ipynb
|
Jupyter Notebook
|
notebooks/QC/3_expression_visualization.ipynb
|
talbulus/Synechococcus_elongatus
|
7255bef8a653c704c7173d2ab5e85cb72b0a1816
|
[
"MIT"
] | null | null | null |
notebooks/QC/3_expression_visualization.ipynb
|
talbulus/Synechococcus_elongatus
|
7255bef8a653c704c7173d2ab5e85cb72b0a1816
|
[
"MIT"
] | null | null | null |
notebooks/QC/3_expression_visualization.ipynb
|
talbulus/Synechococcus_elongatus
|
7255bef8a653c704c7173d2ab5e85cb72b0a1816
|
[
"MIT"
] | null | null | null | 275.106796 | 161,564 | 0.914674 |
[
[
[
"<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#Load-data\" data-toc-modified-id=\"Load-data-1\"><span class=\"toc-item-num\">1 </span>Load data</a></span></li><li><span><a href=\"#Data-Growth\" data-toc-modified-id=\"Data-Growth-2\"><span class=\"toc-item-num\">2 </span>Data Growth</a></span></li><li><span><a href=\"#Hierarchical-Clustering\" data-toc-modified-id=\"Hierarchical-Clustering-3\"><span class=\"toc-item-num\">3 </span>Hierarchical Clustering</a></span></li><li><span><a href=\"#PCA\" data-toc-modified-id=\"PCA-4\"><span class=\"toc-item-num\">4 </span>PCA</a></span></li><li><span><a href=\"#Normalize-to-reference-conditions\" data-toc-modified-id=\"Normalize-to-reference-conditions-5\"><span class=\"toc-item-num\">5 </span>Normalize to reference conditions</a></span><ul class=\"toc-item\"><li><ul class=\"toc-item\"><li><span><a href=\"#ONLY-FOR-PRECISE-DATA\" data-toc-modified-id=\"ONLY-FOR-PRECISE-DATA-5.0.1\"><span class=\"toc-item-num\">5.0.1 </span>ONLY FOR PRECISE DATA</a></span></li></ul></li></ul></li></ul></div>",
"_____no_output_____"
],
[
"<font size=\"4\">This is a template notebook for exploratory analysis on your organism's QC'ed dataset.</font>",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport os\nfrom os import path\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np",
"_____no_output_____"
],
[
"sns.set_style('ticks')",
"_____no_output_____"
]
],
[
[
"# Load data",
"_____no_output_____"
]
],
[
[
"organism = \"Synechococcus_elongatus\"",
"_____no_output_____"
],
[
"data_dir = path.join('/home/tahani/Documents/elongatus/data/1_iModulon')\nos.listdir(data_dir)",
"_____no_output_____"
],
[
"DF_metadata = pd.read_csv(os.path.join(data_dir,'3_metadata_qc_ICA.csv'),index_col=0)\nDF_log_tpm = pd.read_csv(os.path.join(data_dir,'3_log_tpm_qc_ICA.csv'),index_col=0)\n\nDF_metadata.shape\n",
"_____no_output_____"
],
[
"DF_log_tpm.shape",
"_____no_output_____"
],
[
"DF_metadata['ref_condition'][100]",
"_____no_output_____"
]
],
[
[
"# Data Growth",
"_____no_output_____"
]
],
[
[
"DF_metadata['ReleaseDate'] = pd.to_datetime(DF_metadata['ReleaseDate'])",
"_____no_output_____"
],
[
"local = DF_metadata.loc[[x for x in DF_metadata.index if x.startswith('Paeru')]]",
"_____no_output_____"
],
[
"first_date = min(DF_metadata['ReleaseDate'])\nlast_date = max(DF_metadata['ReleaseDate'])\ndate_range = pd.date_range(start=first_date,end=last_date,freq='YS')",
"_____no_output_____"
],
[
"fig,ax = plt.subplots(figsize=(5,5))\ngrowth = DF_metadata['ReleaseDate'].value_counts().sort_index().cumsum()\ngrowth2 = local['ReleaseDate'].value_counts().sort_index().cumsum()\ngrowth2.loc[pd.Timestamp('2013-02-01 00:00:00')] = 0\ngrowth2.loc[pd.Timestamp('2020-03-01 00:00:00')] = growth2.max()\ngrowth.plot(ax=ax,label='All samples')\ngrowth2.plot(ax=ax,label='PRECISE samples')\nplt.xticks(date_range,date_range.strftime('%Y'),rotation=0,ha='center')\nax.tick_params(labelsize=12)\nax.set_xlabel('Year',fontsize=14)\nax.set_ylabel('Number of Samples',fontsize=14)\nplt.legend(fontsize=12)",
"_____no_output_____"
]
],
[
[
"# Hierarchical Clustering",
"_____no_output_____"
],
[
"<font size=4> A clustermap is a great way to visualize the global correlations between one sample and all others. The following code uses hierarchical clustering to identify specific clusters in the clustermap <font size=4>\n \n<font size=4> To increase the number of clusters, decrease the value of `thresh`. To decrease the number of clusters, increase the value of `thresh` <font size=4>",
"_____no_output_____"
]
],
[
[
"import scipy.cluster.hierarchy as sch\nimport matplotlib.patches as patches\n# change this to get different number of clusters\nthresh = .4\n\n# retrieve clusters using fcluster \ncorr = DF_log_tpm.corr()\ncorr.fillna(0,inplace=True)\ndist = sch.distance.pdist(corr)\nlink = sch.linkage(dist, method='complete')\nclst = pd.DataFrame(index=DF_log_tpm.columns)\nclst['cluster'] = sch.fcluster(link, thresh * dist.max(), 'distance')\n\n#get colors for each cluster\ncm = plt.cm.get_cmap('tab20')\nclr = dict(zip(clst.cluster.unique(), cm.colors))\nclst['color'] = clst.cluster.map(clr)\n\nprint('Number of cluster: ', len(clr))",
"Number of cluster: 10\n"
]
],
[
[
"<font size=\"4\">To view sample IDs in the clustermap, set `xticklabels` and `yticklabels` to `True`. You can increase the `size` variable to improve readability of sample IDs<font>",
"_____no_output_____"
]
],
[
[
"size = 9\n\nlegend_TN = [patches.Patch(color=c, label=l) for l,c in clr.items()]\n\nsns.set(rc={'figure.facecolor':'white'})\ng = sns.clustermap(DF_log_tpm.corr(), figsize=(size,size), \n row_linkage=link, col_linkage=link, col_colors=clst.color,\n yticklabels=False, xticklabels=False, vmin=0, vmax=1)\n\nl2=g.ax_heatmap.legend(loc='upper left', bbox_to_anchor=(1.01,0.85), handles=legend_TN,frameon=True)\nl2.set_title(title='Clusters',prop={'size':10})",
"_____no_output_____"
]
],
[
[
"# PCA",
"_____no_output_____"
]
],
[
[
"from sklearn.decomposition import PCA\nimport numpy as np",
"_____no_output_____"
]
],
[
[
"<font size=\"4\"> First compute principal components.</font>",
"_____no_output_____"
]
],
[
[
"pca = PCA()\nDF_weights = pd.DataFrame(pca.fit_transform(DF_log_tpm.T),index=DF_log_tpm.columns)\nDF_components = pd.DataFrame(pca.components_.T,index=DF_log_tpm.index)",
"_____no_output_____"
]
],
[
[
"<font size=\"4\"> Next, plot the cumulative explained variance</font>",
"_____no_output_____"
]
],
[
[
"# Set the explained variance threshold\nvar_cutoff = 0.99\n\nfig,ax = plt.subplots(figsize=(5,3.5))\npca_var = np.cumsum(pca.explained_variance_ratio_)\nax.plot(pca_var)\ndims = np.where(pca_var > var_cutoff)[0][0] + 1\nax.vlines(dims,0,1,linestyles='dotted')\nax.hlines(var_cutoff,0,len(DF_log_tpm.columns),linestyles='dotted')\nax.set_ylim(0,1)\nax.set_xlim(0,len(DF_log_tpm.columns))\nax.set_ylabel('Fraction of Explained Variance',fontsize=12)\nax.set_xlabel('Number of Dimensions',fontsize=12)\nax.set_title('Cumulative Explained Variance',fontsize=16)\nprint('Number of dimensions for 99% of variance:',dims)",
"Number of dimensions for 99% of variance: 127\n"
]
],
[
[
"<font size=\"4\">Finally, plot the first two principle components. The following code colors data by Project Name.</font>",
"_____no_output_____"
]
],
[
[
"fig,ax = plt.subplots(figsize=(7,5))\nfor name,group in DF_metadata.groupby('project'):\n idx = DF_log_tpm.loc[:,group.index.tolist()].columns.tolist()\n ax.scatter(DF_weights.loc[idx,0],\n DF_weights.loc[idx,1],\n label=name,alpha=0.8)\nax.set_xlabel('Component 1: %.1f%%'%(pca.explained_variance_ratio_[0]*100),fontsize=14)\nax.set_ylabel('Component 2: %.1f%%'%(pca.explained_variance_ratio_[1]*100),fontsize=14)\nax.set_title('Principal Component Plot',fontsize=18)\nplt.legend(bbox_to_anchor=(1,1),fontsize=12,ncol=2)",
"_____no_output_____"
]
],
[
[
"# Normalize to reference conditions",
"_____no_output_____"
]
],
[
[
"DF_metadata.project",
"_____no_output_____"
],
[
"project_exprs = []\nfor name,group in DF_metadata.groupby('project'):\n ref_cond = group.ref_condition.unique()\n \n # Ensure that there is only one reference condition per project\n assert(len(ref_cond) == 1)\n ref_cond = ref_cond[0]\n \n # Ensure the reference condition is in fact in the project\n assert(ref_cond in group.condition.tolist())\n \n # Get reference condition sample ids\n ref_samples = group[group.condition == ref_cond].index\n \n # Get reference condition expression\n ref_expr = DF_log_tpm[ref_samples].mean(axis=1)\n \n # Subtract reference expression from project\n project_exprs.append(DF_log_tpm[group.index].sub(ref_expr,axis=0))\n\nDF_log_tpm_norm = pd.concat(project_exprs,axis=1)",
"_____no_output_____"
],
[
"DF_log_tpm_norm.head()",
"_____no_output_____"
]
],
[
[
"Tahani Tuesday Oct 6, 4pm ",
"_____no_output_____"
],
[
"<font size=4>Uncomment this code to save the log_tpm_norm file</font>",
"_____no_output_____"
]
],
[
[
"DF_log_tpm_norm.to_csv(path.join(data_dir,'3_log_tpm_normalized_ICA_second_run.csv'))",
"_____no_output_____"
],
[
"DF_log_tpm_norm.shape",
"_____no_output_____"
]
],
[
[
"### ONLY FOR PRECISE DATA",
"_____no_output_____"
]
],
[
[
"fig,ax = plt.subplots(figsize=(2,3))\nplt.bar(range(5),[-2,-3,-2.6,-20,-19],width=1,linewidth=0,color=['tab:orange']*3+['tab:blue']*2)\nplt.xticks([1,3.5],labels=('Ctrl','Izd'),fontsize=16)\nplt.yticks([])\nplt.ylabel('sigD Activity',fontsize=16)",
"_____no_output_____"
],
[
"DF_metadata = pd.read_csv('/home/anand/Downloads/metadata_all.csv',index_col=0)",
"_____no_output_____"
],
[
"DF_metadata['ReleaseDate'] = pd.to_datetime(DF_metadata['ReleaseDate'])",
"_____no_output_____"
],
[
"local = pd.read_csv('/home/anand/Downloads/local_metadata.csv',index_col=0)\nlocal['ReleaseDate'] = pd.to_datetime(local['ReleaseDate'])",
"_____no_output_____"
],
[
"DF_metadata = pd.concat([DF_metadata,local])",
"_____no_output_____"
],
[
"first_date = min(DF_metadata['ReleaseDate'])\nlast_date = max(DF_metadata['ReleaseDate'])\ndate_range = pd.date_range(start=first_date,end=last_date,freq='YS')",
"_____no_output_____"
],
[
"fig,ax = plt.subplots(figsize=(5,5))\ngrowth = DF_metadata['ReleaseDate'].value_counts().sort_index().cumsum()\ngrowth2 = local['ReleaseDate'].value_counts().sort_index().cumsum()\ngrowth2.loc[pd.Timestamp('2016-05-01 00:00:00')] = 0\n#growth2.loc[pd.Timestamp('2020-03-01 00:00:00')] = growth2.max()\ngrowth.plot(ax=ax,label='All samples')\ngrowth2.plot(ax=ax,label='PRECISE samples')\nplt.xticks(date_range,date_range.strftime('%Y'),rotation=0,ha='center')\nax.tick_params(labelsize=12)\nax.set_xlabel('Year',fontsize=14)\nax.set_ylabel('Number of Samples',fontsize=14)\nplt.legend(fontsize=12)",
"_____no_output_____"
],
[
"len(local)",
"_____no_output_____"
],
[
"DF_metadata['ReleaseDate'] = pd.to_datetime(DF_metadata['ReleaseDate'])",
"_____no_output_____"
],
[
"local = pd.read_csv('/home/anand/Downloads/Abaum_metadata.csv',index_col=0)\nlocal['ReleaseDate'] = pd.to_datetime(local['harvest-date'])",
"_____no_output_____"
],
[
"DF_metadata = pd.concat([DF_metadata,local])",
"_____no_output_____"
],
[
"first_date = min(DF_metadata['ReleaseDate'])\nlast_date = max(DF_metadata['ReleaseDate'])\ndate_range = pd.date_range(start=first_date,end=last_date,freq='YS')",
"_____no_output_____"
],
[
"fig,ax = plt.subplots(figsize=(5,5))\ngrowth = DF_metadata['ReleaseDate'].value_counts().sort_index().cumsum()\ngrowth2 = local['ReleaseDate'].value_counts().sort_index().cumsum()\ngrowth2.loc[pd.Timestamp('2014-06-01 00:00:00')] = 0\ngrowth2.loc[pd.Timestamp('2020-03-01 00:00:00')] = growth2.max()\ngrowth.plot(ax=ax,label='All samples')\ngrowth2.plot(ax=ax,label='PRECISE samples')\nplt.xticks(date_range,date_range.strftime('%Y'),rotation=0,ha='center')\nax.tick_params(labelsize=12)\nax.set_xlabel('Year',fontsize=14)\nax.set_ylabel('Number of Samples',fontsize=14)\nplt.legend(fontsize=12)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a18af8bf0131c2c7408e6c4b852a4a641944351
| 233,206 |
ipynb
|
Jupyter Notebook
|
concepts/handling_timeseries.ipynb
|
DataGeoranger/pastas
|
f6ceb66c7d76e27940257bd1e2d9192b995aed5d
|
[
"MIT"
] | 252 |
2017-01-25T05:48:53.000Z
|
2022-03-31T17:46:37.000Z
|
concepts/handling_timeseries.ipynb
|
DataGeoranger/pastas
|
f6ceb66c7d76e27940257bd1e2d9192b995aed5d
|
[
"MIT"
] | 279 |
2017-02-14T10:59:01.000Z
|
2022-03-31T09:17:37.000Z
|
concepts/handling_timeseries.ipynb
|
DataGeoranger/pastas
|
f6ceb66c7d76e27940257bd1e2d9192b995aed5d
|
[
"MIT"
] | 57 |
2017-02-14T10:26:54.000Z
|
2022-03-11T14:04:48.000Z
| 262.915445 | 121,564 | 0.908073 |
[
[
[
"# Time series in Pastas\n*R.A. Collenteur, University of Graz, 2020*\n\nTime series are at the heart of time series analysis, and therefore need to be considered carefully when dealing with time series models. In this notebook more background information is provided on important characteristics of time series and how these may influence your modeling results. In general, Pastas depends heavily on Pandas for dealing with time series, but adds capabilities to deal with irregular time series and missing data.\n\nAll time series should be provided to Pastas as `pandas.Series` with a `pandas.DatetimeIndex`. Internally these time series are stored in a `pastas.TimeSeries` object. The goal of this object is to validate the user-provided time series and enable resampling (changing frequencies) of the independent time series. The TimeSeries object also has capabilities to deal with missing data in the user-provided time series. As much of these operations occur internally, this notebook is meant to explain users what is happening and how to check for this.\n\n<div class=\"alert alert-info\">\n\n<b>Note</b>\n \n* The standard Pastas data type for a date is the `pandas.Timestamp`.\n* The standard Pastas data type for a sequence of dates is the `pandas.DatetimeIndex` with `pandas.Timestamp`.\n* The standard Pastas data type for a time series is a `pandas.Series` with a `pandas.DatetimeIndex`\n \n</div>",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport pastas as ps\n\nps.show_versions()",
"Python version: 3.8.2 (default, Mar 25 2020, 11:22:43) \n[Clang 4.0.1 (tags/RELEASE_401/final)]\nNumpy version: 1.19.2\nScipy version: 1.6.0\nPandas version: 1.2.1\nPastas version: 0.17.0b\nMatplotlib version: 3.3.2\n"
]
],
[
[
"## Different types of time series\nTime series data may generally be defined as a set of data values observed at certain times, ordered in a way that the time indices are increasing. Many time series analysis method assume that the time step between the observations is regular, the time series has evenly-spaced observations. These evenly spaced time series may have missing data, but it will still be possible to lay the values on a time-grid with constant time steps. \n\nThis is generally also assumed to be the case for the independent time series in hydrological studies. For example, the precipitation records may have some missing data but the precipitation is reported as the total rainfall over one day. In the case of missing data, we may impute a zero (no rain) or the rainfall amount from a nearby measurement station.\n\nGroundwater level time series do generally not share these characteristics with other hydrological time series, and are measured at irregular time intervals. This is especially true for historic time series that were measured by hand. The result is that the measurements can not be laid on a regular time grid. The figure below graphically shows the difference between the three types of time series.",
"_____no_output_____"
]
],
[
[
"regular = pd.Series(index=pd.date_range(\"2000-01-01\", \"2000-01-10\", freq=\"D\"),\n data=np.ones(10))\nmissing_data = regular.copy()\nmissing_data.loc[[\"2000-01-03\", \"2000-01-08\"]] = np.nan\n\nindex = [t + pd.Timedelta(np.random.rand()*24, unit=\"H\") for t in missing_data.index]\nirregular = missing_data.copy()\nirregular.index = index\n\nfig, axes = plt.subplots(3,1, figsize=(6, 5), sharex=True, sharey=True)\n\nregular.plot(ax=axes[0], linestyle=\" \", marker=\"o\", x_compat=True)\nmissing_data.plot(ax=axes[1], linestyle=\" \", marker=\"o\", x_compat=True)\nirregular.plot(ax=axes[2], linestyle=\" \", marker=\"o\", x_compat=True)\n\nfor i, name in enumerate([\"(a) Regular time steps\", \"(b) Missing Data\", \"(c) Irregular time steps\"]):\n axes[i].grid()\n axes[i].set_title(name)\nplt.tight_layout()",
"_____no_output_____"
]
],
[
[
"## Independent and dependent time series \nWe can differentiate between two types of input time series for Pastas models: the dependent and independent time series. The dependent time series are those that we want to explain (e.g., the groundwater levels) and the independent time series are those that we use to explain the dependent time series (e.g., precipitation or evaporation). The requirements for these time series are different:\n\n- The dependent time series may be of any kind: regular, missing data or irregular.\n- The independent time series has to have regular time steps.\n\nIn practice, this means that the time series provided to `pastas.Model` may be of any kind, and that the time series used by the stressmodels (e.g., `pastas.RerchargeModel`) need to have regular time steps. The regular time steps are required to simulate contributions to the groundwater level fluctuations. As there are virtually no restrictions on the dependent time series, the remainder of this notebook will discuss primarily the independent time series.\n\n## How does the TimeSeries object validate a time series?\nTo ensure that a time series can be used for simulation a number of things are checked and changed:\n\n1. Make sure the values are floats. Values are change to dtype=float if not.\n2. Make sure the index is a `pandas.DatetimeIndex`. Index is changed if not.\n3. Make sure the timestamps in the index are increasing. Index is sorted if not.\n4. Make sure there are no nan-values at the start and end of a time series.\n5. Determine the frequency of the time series.\n6. Make sure there are no duplicate indices. Values are averaged if this is the case.\n7. Remove or fill up nan-values, depending on the settings.\n\nFor each of these steps an INFO message will be returned by Pastas to inform the user if a change is made. The first four steps generally do not have a large impact and are there to prevent some basic issues. Preferably, no changes are reported. \n\n### Frequency of the input data\nPastas tries to determine the frequency in step 5, and will **always** report the result. It is generally good practice to double-check if the reported frequency agrees with what you know about the time series. Pastas will also report if no frequency can be inferred. If no frequency is reported there is probably some wrong and the user should fix either fix the input time series or provide Pastas with more information.\n\nBelow we consider a time series with precipitation data, measured every day. We will use `settings=\"prec` as a shortcut for the settings to fill nans and resample. We will come back to those settings later.",
"_____no_output_____"
]
],
[
[
"rain = pd.read_csv('../examples/data/rain_nb1.csv', parse_dates=['date'],\n index_col='date', squeeze=True)\nps.TimeSeries(rain, settings=\"prec\")",
"INFO: Inferred frequency for time series rain: freq=D\n"
]
],
[
[
"Pastas correctly report the frequency and we can continue with this time series. Note that the input time series thus agrees with all the checks for the time series validation. Let's now introduce a nan-value and see what happens.",
"_____no_output_____"
]
],
[
[
"rain[\"1989-01-01\"] = np.nan\nps.TimeSeries(rain, settings=\"prec\")",
"INFO: Inferred frequency for time series rain: freq=D\nINFO: Time Series rain: 1 nan-value(s) was/were found and filled with: 0.0.\n"
]
],
[
[
"This also works fine. The frequency was inferred (stored as freq_original) and one nan-value was filled up with 0.0. Now we take the same time series, but drop the nan-value.",
"_____no_output_____"
]
],
[
[
"ps.TimeSeries(rain.dropna(), settings=\"prec\")",
"INFO: Cannot determine frequency of series rain: freq=None. Resample settings are ignored and timestep_weighted_resample is used.\n"
]
],
[
[
"The above result is probably not what we want. Pastas could not infer the frequency and therefore resorts to the `timestep_weighted_resample` method. Documentation for this method is available in utils.py.\n\nIf we know the original frequency of the time series, we can tell this to Pastas through the `freq_original` argument. As we can see below, the user-provided frequency is used.",
"_____no_output_____"
]
],
[
[
"rain = pd.read_csv('../examples/data/rain_nb1.csv', parse_dates=['date'],\n index_col='date', squeeze=True)\nrain[\"1989-01-01\"] = np.nan\nps.TimeSeries(rain.dropna(), settings=\"prec\", freq_original=\"D\")",
"INFO: User provided frequency for time series rain: freq=D\nINFO: Time Series rain: 1 nan-value(s) was/were found and filled with: 0.0.\n"
]
],
[
[
"The above example shows how to obtain the same or different result with four different methods. Some of these methods requires good knowledge about the TimeSeries object and how it processes your time series. It is often preferred to provide Pastas with a better initial time series by resampling it yourself. This has the additional benefit that you are interacting more closely with the data. Most of the examples also follow this pattern.\n\n<div class=\"alert alert-info\">\n\n<b>Best practice</b>\n\nTry and modify your original time series such that Pastas returns a message that it was able to infer the frequency from the time series itself: **INFO: Inferred frequency for time series rain: freq=D**\n \n</div>",
"_____no_output_____"
],
[
"## Time series settings\nIn the examples above we used the `settings` keyword when creating the TimeSeries. This is a shortcut method to select a number of settings from a predefined set of options. These predefined options can accessed through `ps.rcParams[\"timeseries\"]`:",
"_____no_output_____"
]
],
[
[
"pd.DataFrame.from_dict(ps.rcParams[\"timeseries\"])",
"_____no_output_____"
]
],
[
[
"Each column name is a valid option for the `settings` argument. The rows shows the settings that may be chosen for changing the original time series. Once a TimeSeries is created, we can access the existing settings as follows:",
"_____no_output_____"
]
],
[
[
"ts = ps.TimeSeries(rain, settings=\"prec\")\nts.settings",
"INFO: Inferred frequency for time series rain: freq=D\nINFO: Time Series rain: 1 nan-value(s) was/were found and filled with: 0.0.\n"
]
],
[
[
"This settings dictionary now includes both settings used to resample (sample_up, sample_down), extend (fill_before, fill_after), normalize (norm), and fill nans in the time series, but also dynamic settings such as the start and end date (tmin, tmax), the frequency (freq) and the time offset. \n\nTo update these settings you the `update_series` method is available. For example, if we want to resample the above time series to a 7-day frequency and sum up the values we can use:",
"_____no_output_____"
]
],
[
[
"ts.update_series(freq=\"7D\", sample_down=\"sum\")",
"INFO: Time Series rain was sampled down to freq 7D with method sum.\nINFO: Time Series rain was extended to 1980-01-01 00:00:00 with the mean value of the time series.\n"
]
],
[
[
"Because the original series are stored in the TimeSeries object as well, it is also possible to go back again. The changing made to the time series are always started from the original validated time series again. For more information on the possible settings see the API-docs for the [TimeSeries and update_series method](https://pastas.readthedocs.io/en/latest/api/timeseries.html) on the documentation website.",
"_____no_output_____"
],
[
"## An example with a Pastas Model\nBy now you may be wondering why all these settings exist in the first place. The main reason (apart from validating the user-provided time series) is to change the time step of the simulation of the independent time series. It may also be used to extend the time series in time.\n\nBelow we load some time series, visualize them and create a Pastas model with precipitation and evaporation to explain the groundwater level fluctuations. It is generally recommended to plot your time series for a quick visual check of the input data.",
"_____no_output_____"
]
],
[
[
"head = pd.read_csv(\"../examples/data/B32C0639001.csv\", parse_dates=['date'], \n index_col='date', squeeze=True) \nrain = pd.read_csv('../examples/data/rain_nb1.csv', parse_dates=['date'],\n index_col='date', squeeze=True)\nevap = pd.read_csv('../examples/data/evap_nb1.csv', parse_dates=['date'],\n index_col='date', squeeze=True)\n\nfig, axes = plt.subplots(3,1, figsize=(10,6), sharex=True)\nhead.plot(ax=axes[0], x_compat=True, linestyle=\" \", marker=\".\")\nevap.plot(ax=axes[1], x_compat=True)\nrain.plot(ax=axes[2], x_compat=True)\naxes[0].set_ylabel(\"Head [m]\")\naxes[1].set_ylabel(\"Evap [mm/d]\")\naxes[2].set_ylabel(\"Rain [mm/d]\")\n\nplt.xlim(\"1985\", \"2005\");",
"_____no_output_____"
],
[
"ml = ps.Model(head)\n\nrch = ps.rch.Linear()\nrm = ps.RechargeModel(rain, evap, recharge=rch, rfunc=ps.Gamma, name=\"rch\")\nml.add_stressmodel(rm)\n\nml.solve(noise=True, tmin=\"1990\", report=\"basic\")",
"INFO: Cannot determine frequency of series head: freq=None. The time series is irregular.\nINFO: Inferred frequency for time series rain: freq=D\nINFO: Inferred frequency for time series evap: freq=D\n"
]
],
[
[
"## What is the model freq?\nThe output below shows that the time series have frequencies of `freq=D`. The fit report also shows a frequency of `freq=D`. The frequency reported in the fit_report is the time step of the simulation for the independent time series, and is internally passed on to the stressmodels. The user-provided dependent time series are stored in the stressmodel object and can be accessed as follows. ",
"_____no_output_____"
]
],
[
[
"ml.stressmodels[\"rch\"].stress",
"_____no_output_____"
]
],
[
[
"If we want to change the resample method, for example we want to sum the precipitation and evaporation when sampling down (e.g., daily to weekly) we may do the following:",
"_____no_output_____"
]
],
[
[
"for stress in ml.stressmodels[\"rch\"].stress:\n stress.update_series(sample_down=\"sum\")",
"_____no_output_____"
]
],
[
[
"After changing the methods for sampling down, we now solve the model with a simulation time step of 14 days. The precipitation and evaporation are then summed up over 14 day intervals, before being translated to a groundwater fluctuation using a respons function.",
"_____no_output_____"
]
],
[
[
"ml.settings",
"_____no_output_____"
],
[
"ml.solve(freq=\"14D\", tmin=\"1980\", report=\"basic\")\nml.plots.results(figsize=(10,6), tmin=\"1970\");",
"INFO: Time Series rain was sampled down to freq 14D with method sum.\nINFO: Time Series rain was extended to 1971-07-01 00:00:00 with the mean value of the time series.\nINFO: Time Series evap was sampled down to freq 14D with method sum.\nINFO: Time Series evap was extended to 1971-07-01 00:00:00 with the mean value of the time series.\nINFO: There are observations between the simulation timesteps. Linear interpolation between simulated values is used.\nINFO: Time Series rain was sampled down to freq 14D with method sum.\nINFO: Time Series rain was extended to 1959-12-31 00:00:00 with the mean value of the time series.\nINFO: Time Series evap was sampled down to freq 14D with method sum.\nINFO: Time Series evap was extended to 1959-12-31 00:00:00 with the mean value of the time series.\n"
],
[
"ml.stressmodels[\"rch\"].stress[1].update_series(tmin=\"1960\")\nml.stressmodels[\"rch\"].stress[1].settings",
"INFO: Time Series evap was sampled down to freq 14D with method sum.\nINFO: Time Series evap was extended to 1960-01-01 00:00:00 with the mean value of the time series.\n"
]
],
[
[
"Another method to obtain the settings of the time series used in a stressmodel is as follows:",
"_____no_output_____"
]
],
[
[
"ml.get_stressmodel_settings(\"rch\")",
"_____no_output_____"
]
],
[
[
"## Warnings\nBecause the TimeSeries object is a relatively complicated object that can potentially change model results extra care has to be taken in some cases. Below is a number of outstanding warnings and the related GitHub issues. \n\n<div class=\"alert alert-warning\">\n\n<b>A note on dependent time series</b>\n\nThe dependent time series (stored as `ml.oseries`) are also stored in a TimeSeries object and therefore have the same capabilities. Usage of these methods on the dependent time series is however experimental and not recommended for real world use cases. See also [Issue #68](https://github.com/pastas/pastas/issues/68) and [Discussion #199](https://github.com/pastas/pastas/discussions/199)\n \n</div>\n\n\n<div class=\"alert alert-warning\">\n\n<b>A note on monthly data</b>\n\nMonthly data is strictly irregular data, and poses additional challenges when resampling to regular frequencies. Pastas does not differentiate between monthly data reported at months end (`freq=M`) or months beginning (`freq=MS`) and the default settings are selected for `freq=M`. There may also be issues with extending the time series. See also [Issue #239](https://github.com/pastas/pastas/issues/239)\n \n</div>",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a18b20d8411642a01782f6f11c97e8195566fdd
| 217,287 |
ipynb
|
Jupyter Notebook
|
Pytorch/Barlowtwins.ipynb
|
ashishpatel26/Self-Supervisedd-Learning
|
973129b34fcf8a05ad15871e16b717294005c854
|
[
"MIT"
] | 2 |
2022-01-24T12:38:30.000Z
|
2022-01-25T05:33:46.000Z
|
Pytorch/Barlowtwins.ipynb
|
ashishpatel26/Self-Supervisedd-Learning
|
973129b34fcf8a05ad15871e16b717294005c854
|
[
"MIT"
] | null | null | null |
Pytorch/Barlowtwins.ipynb
|
ashishpatel26/Self-Supervisedd-Learning
|
973129b34fcf8a05ad15871e16b717294005c854
|
[
"MIT"
] | null | null | null | 598.586777 | 198,502 | 0.930488 |
[
[
[
"### Barlow Twins\n* **BarlowTwins** objective function that naturally avoids collapse by measuring the cross-correlation matrix between the outputs of two identical networks fed with distorted versions of a sample, and making it as close to the identity matrix as possible. This causes the embedding vectors of distorted versions of a sample to be similar, while minimizing the redundancy between the components of these vectors. The method is called BARLOW TWINS.\n* This is name coming from neuroscientist H. Barlow’s redundancy-reduction principle applied to a pair of identical networks.\n* This Paper is published by Collobrative with YanLeCun Facebook Team.\n\n",
"_____no_output_____"
]
],
[
[
"# !pip install lightly av",
"_____no_output_____"
],
[
"import torch\nfrom torch import nn\nimport torchvision\n\nfrom lightly.data import LightlyDataset\nfrom lightly.data import ImageCollateFunction\nfrom lightly.models.modules import BarlowTwinsProjectionHead\nfrom lightly.loss import BarlowTwinsLoss",
"_____no_output_____"
],
[
"class BarlowTwins(nn.Module):\n def __init__(self, backbone):\n super().__init__()\n self.backbone = backbone\n self.projection_head = BarlowTwinsProjectionHead(512, 2048, 2048)\n\n def forward(self, x):\n x = self.backbone(x).flatten(start_dim=1)\n z = self.projection_head(x)\n return z",
"_____no_output_____"
],
[
"resnet = torchvision.models.resnet18()\nbackbone = nn.Sequential(*list(resnet.children())[:-1])\nmodel = BarlowTwins(backbone)",
"_____no_output_____"
],
[
"device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\nmodel.to(device)",
"_____no_output_____"
],
[
"cifar10 = torchvision.datasets.CIFAR10(\"datasets/cifar10\", download=True)\ndataset = LightlyDataset.from_torch_dataset(cifar10)",
"Files already downloaded and verified\n"
],
[
"collate_fn = ImageCollateFunction(input_size=32)",
"_____no_output_____"
],
[
"dataloader = torch.utils.data.DataLoader(\n dataset,\n batch_size=1024,\n collate_fn=collate_fn,\n shuffle=True,\n drop_last=True,\n num_workers=8,\n)",
"/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:481: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n cpuset_checked))\n"
],
[
"criterion = BarlowTwinsLoss()\noptimizer = torch.optim.SGD(model.parameters(), lr=0.06)",
"_____no_output_____"
],
[
"print(\"Starting Training\")\nfor epoch in range(5):\n total_loss = 0\n for (x0, x1), _, _ in dataloader:\n x0 = x0.to(device)\n x1 = x1.to(device)\n z0 = model(x0)\n z1 = model(x1)\n loss = criterion(z0, z1)\n total_loss += loss.detach()\n loss.backward()\n optimizer.step()\n optimizer.zero_grad()\n avg_loss = total_loss / len(dataloader)\n print(f\"epoch: {epoch:>02}, loss: {avg_loss:.5f}\")",
"Starting Training\n"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a18c1b045f2a1f8e0410defbc46a62c50b7bc54
| 32,281 |
ipynb
|
Jupyter Notebook
|
jupyter/btwin_draft.ipynb
|
ashishpapanai/bTwin
|
220790017365f3152076010dd250b824d4db31fd
|
[
"BSD-3-Clause"
] | null | null | null |
jupyter/btwin_draft.ipynb
|
ashishpapanai/bTwin
|
220790017365f3152076010dd250b824d4db31fd
|
[
"BSD-3-Clause"
] | null | null | null |
jupyter/btwin_draft.ipynb
|
ashishpapanai/bTwin
|
220790017365f3152076010dd250b824d4db31fd
|
[
"BSD-3-Clause"
] | null | null | null | 43.21419 | 392 | 0.459187 |
[
[
[
"<h3><center><span style=\"font-size: 200%;\">bTwin</span><sup>β</sup> Find your Bollywood Twin </center></h3>\n\nbTwin is an acronym for Bollywood Twin. The idea is to let the user find his celebrity twin by using the technique of computer vision using convolution neural networks (CNNs). \n\nThe current dataset is a collection of pictures of top 100 celebrities of bollywood listed by hungama.com. ",
"_____no_output_____"
],
[
"## 1. Data Collection:\n\nImporting the necessary libraries for data collection from the csv file generated by using the support modules.<br>\nThe <b>Pandas</b> library will help in doing all operations on the data stored in form of a dataframe. <br>\nThe <b>Numpy</b> library is for all linear algebra and mathematical operations.",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np",
"_____no_output_____"
]
],
[
[
"The following lines of code restricts the GPU from using complete memory.",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf\n\nphysical_devices = tf.config.list_physical_devices('gpu')\nif len(physical_devices) > 0:\n print(\"Using GPU. \")\n tf.config.experimental.set_memory_growth(physical_devices[0], True)\nelse:\n print(\"No GPU found. \")",
"No GPU found. \n"
]
],
[
[
"Importing the csv file which contains the value of each pixel of 100 X 100 pixel grayscale image of each celebrity. ",
"_____no_output_____"
]
],
[
[
"df = pd.read_csv('../data.csv')\nprint(df)",
" Label Pixel1 Pixel2 Pixel3 Pixel4 Pixel5 Pixel6 Pixel7 Pixel8 \\\n0 0 40 39 37 27 37 36 50 43 \n1 0 220 199 173 145 132 130 125 126 \n2 0 213 213 211 214 212 215 226 226 \n3 0 88 88 88 88 88 88 88 88 \n4 0 37 42 46 44 37 32 33 37 \n... ... ... ... ... ... ... ... ... ... \n12162 99 9 15 9 11 15 10 46 54 \n12163 99 131 122 128 151 156 153 162 162 \n12164 99 184 187 187 183 183 187 189 188 \n12165 99 248 233 219 185 151 148 154 154 \n12166 99 112 114 115 115 115 117 117 117 \n\n Pixel9 ... Pixel9991 Pixel9992 Pixel9993 Pixel9994 Pixel9995 \\\n0 41 ... 63 66 69 67 64 \n1 122 ... 93 77 75 75 72 \n2 226 ... 50 51 52 52 51 \n3 88 ... 89 105 108 103 106 \n4 43 ... 104 190 180 185 183 \n... ... ... ... ... ... ... ... \n12162 46 ... 57 63 70 62 65 \n12163 153 ... 246 244 243 244 245 \n12164 188 ... 196 197 198 199 199 \n12165 167 ... 14 23 23 19 92 \n12166 120 ... 103 103 102 102 102 \n\n Pixel9996 Pixel9997 Pixel9998 Pixel9999 Pixel10000 \n0 67 71 70 63 60 \n1 84 88 79 86 86 \n2 53 51 51 51 53 \n3 106 105 103 101 98 \n4 182 184 185 186 187 \n... ... ... ... ... ... \n12162 82 108 111 115 119 \n12163 244 244 244 243 243 \n12164 201 201 199 196 192 \n12165 197 177 188 184 190 \n12166 103 103 103 103 103 \n\n[12167 rows x 10001 columns]\n"
]
],
[
[
"The data comprises 12167 images of 100 Bollywood celebrities. The images are grayscaled and are of 100 X 100 pixel resolution, i.e. a total of 10000 pixels.\n<br>\nThe column indexed as Label stores a number/index representing the name of the celebrity based on the following list [in Alphabetical order]: <br>\n\n<details>\n <summary>\n The Label number corresponding to the name of the celebrity <i>[Click the arrow for details]</i>\n </summary>\n1. Aamir_Khan<br>\n2. Abhay_Deol<br>\n3. Abhishek_Bachchan<br>\n4. Aftab_Shivdasani<br>\n5. Aishwarya_Rai<br>\n6. Ajay_Devgn<br>\n7. Akshaye_Khanna<br>\n8. Akshay_Kumar<br>\n9. Alia_Bhatt<br>\n10. Ameesha_Patel<br>\n11. Amitabh_Bachchan<br>\n12. Amrita_Rao<br>\n13. Amy_Jackson<br>\n14. Anil_Kapoor<br>\n15. Anushka_Sharma<br>\n16. Anushka_Shetty<br>\n17. Arjun_Kapoor<br>\n18. Arjun_Rampal<br>\n19. Arshad_Warsi<br>\n20. Asin<br>\n21. Ayushmann_Khurrana<br>\n22. Bhumi_Pednekar<br>\n23. Bipasha_Basu<br>\n24. Bobby_Deol<br>\n25. Deepika_Padukone<br>\n26. Disha_Patani<br>\n27. Emraan_Hashmi<br>\n28. Esha_Gupta<br>\n29. Farhan_Akhtar<br>\n30. Govinda<br>\n31. Hrithik_Roshan<br>\n32. Huma_Qureshi<br>\n33. Ileana_DCruz<br>\n34. Irrfan_Khan<br>\n35. Jacqueline_Fernandez<br>\n36. John_Abraham<br>\n37. Juhi_Chawla<br>\n38. Kajal_Aggarwal<br>\n39. Kajol<br>\n40. Kangana_Ranaut<br>\n41. Kareena_Kapoor<br>\n42. Karisma_Kapoor<br>\n43. Kartik_Aaryan<br>\n44. Katrina_Kaif<br>\n45. Kiara_Advani<br>\n46. Kriti_Kharbanda<br>\n47. Kriti_Sanon<br>\n48. Kunal_Khemu<br>\n49. Lara_Dutta<br>\n50. Madhuri_Dixit<br>\n51. Manoj_Bajpayee<br>\n52. Mrunal_Thakur<br>\n53. Nana_Patekar<br>\n54. Nargis_Fakhri<br>\n55. Naseeruddin_Shah<br>\n56. Nushrat_Bharucha<br>\n57. Paresh_Rawal<br>\n58. Parineeti_Chopra<br>\n59. Pooja_Hegde<br>\n60. Prabhas<br>\n61. Prachi_Desai<br>\n62. Preity_Zinta<br>\n63. Priyanka_Chopra<br>\n64. Rajkummar_Rao<br>\n65. Ranbir_Kapoor<br>\n66. Randeep_Hooda<br>\n67. Rani_Mukerji<br>\n68. Ranveer_Singh<br>\n69. Richa_Chadda<br>\n70. Riteish_Deshmukh<br>\n71. R_Madhavan<br>\n72. Saif_Ali_Khan<br>\n73. Salman_Khan<br>\n74. Sanjay_Dutt<br>\n75. Sara_Ali_Khan<br>\n76. Shahid_Kapoor<br>\n77. Shah_Rukh_Khan<br>\n78. Shilpa_Shetty<br>\n79. Shraddha_Kapoor<br>\n80. Shreyas_Talpade<br>\n81. Shruti_Haasan<br>\n82. Sidharth_Malhotra<br>\n83. Sonakshi_Sinha<br>\n84. Sonam_Kapoor<br>\n85. Suniel_Shetty<br>\n86. Sunny_Deol<br>\n87. Sushant_Singh_Rajput<br>\n88. Taapsee_Pannu<br>\n89. Tabu<br>\n90. Tamannaah_Bhatia<br>\n91. Tiger_Shroff<br>\n92. Tusshar_Kapoor<br>\n93. Uday_Chopra<br>\n94. Vaani_Kapoor<br>\n95. Varun_Dhawan<br>\n96. Vicky_Kaushal<br>\n97. Vidya_Balan<br>\n98. Vivek_Oberoi<br>\n99. Yami_Gautam<br>\n100. Zareen_Khan<br>\n</details>",
"_____no_output_____"
],
[
"### The best practice to prepare any dataset is to split it into 3 parts:\n<u><b>Training Set:</b></u> This set will be used to train the model and develop a relation between the labels and the training data. *[60% of total data]*<br>\n<u><b>Validation Set:</b></u> This set will be used to validate the reletationship developed by the model on the training data to check if the model is overfitting or underfitting or is perfect for the given data. With the result of this data we decide if we should show the test data to the model or to tune the hyperparameters or model architecture again. *[20% of total data]*<br>\n<u><b>Test Set:</b></u> This set is the final test to check if the model is perfect and is ready to go for the deployment. *[20% of total data]* ",
"_____no_output_____"
]
],
[
[
"train, validate, test = np.split(df.sample(frac=1, random_state=42), [int(.6*len(df)), int(.8*len(df))])",
"_____no_output_____"
],
[
"print(\"Training Data: \")\nprint(train)\nprint(\"Validation Data: \")\nprint(validate)\nprint(\"Test Data: \")\nprint(test)",
"Training Data: \n Label Pixel1 Pixel2 Pixel3 Pixel4 Pixel5 Pixel6 Pixel7 Pixel8 \\\n8268 66 241 241 241 241 241 241 242 242 \n2628 21 139 142 147 149 149 149 150 151 \n6107 48 185 187 189 191 192 193 194 195 \n3105 24 175 176 177 178 180 182 185 187 \n7840 63 152 153 153 154 154 155 155 156 \n... ... ... ... ... ... ... ... ... ... \n3176 25 192 190 190 191 190 189 190 193 \n5072 40 5 7 4 5 3 4 7 3 \n3359 26 22 18 15 15 16 16 18 20 \n7464 60 129 117 101 82 63 57 64 72 \n8158 66 185 187 192 197 198 198 202 208 \n\n Pixel9 ... Pixel9991 Pixel9992 Pixel9993 Pixel9994 Pixel9995 \\\n8268 242 ... 188 179 187 171 173 \n2628 150 ... 214 223 171 132 135 \n6107 192 ... 112 109 107 105 103 \n3105 187 ... 111 111 244 180 202 \n7840 158 ... 152 151 151 152 152 \n... ... ... ... ... ... ... ... \n3176 194 ... 183 184 183 182 182 \n5072 4 ... 5 5 5 5 5 \n3359 29 ... 213 210 211 211 198 \n7464 99 ... 36 33 31 38 33 \n8158 207 ... 69 102 114 99 82 \n\n Pixel9996 Pixel9997 Pixel9998 Pixel9999 Pixel10000 \n8268 177 183 189 182 200 \n2628 112 98 84 78 82 \n6107 101 100 99 98 98 \n3105 244 244 245 245 245 \n7840 153 151 150 149 150 \n... ... ... ... ... ... \n3176 182 182 181 180 179 \n5072 5 5 5 5 5 \n3359 181 177 185 188 190 \n7464 28 24 20 16 15 \n8158 67 57 52 55 61 \n\n[7300 rows x 10001 columns]\nValidation Data: \n Label Pixel1 Pixel2 Pixel3 Pixel4 Pixel5 Pixel6 Pixel7 Pixel8 \\\n10485 86 243 245 248 251 254 255 255 255 \n8908 72 110 114 119 122 125 128 132 135 \n10930 89 116 113 110 107 105 104 105 108 \n3289 26 100 119 130 124 110 102 112 130 \n10972 90 23 23 24 25 25 26 28 29 \n... ... ... ... ... ... ... ... ... ... \n6511 51 180 189 181 171 169 172 182 184 \n5439 43 171 171 171 171 171 171 171 171 \n11239 93 231 232 233 233 234 235 236 237 \n8964 73 142 145 146 144 143 144 145 144 \n11824 97 2 3 3 3 3 3 3 4 \n\n Pixel9 ... Pixel9991 Pixel9992 Pixel9993 Pixel9994 Pixel9995 \\\n10485 255 ... 181 126 137 124 17 \n8908 136 ... 163 161 159 158 153 \n10930 109 ... 157 156 156 156 157 \n3289 148 ... 10 14 15 16 17 \n10972 32 ... 38 37 38 37 34 \n... ... ... ... ... ... ... ... \n6511 185 ... 218 223 206 202 201 \n5439 171 ... 26 38 37 22 12 \n11239 235 ... 208 209 209 209 208 \n8964 147 ... 74 92 62 102 62 \n11824 3 ... 137 254 243 247 242 \n\n Pixel9996 Pixel9997 Pixel9998 Pixel9999 Pixel10000 \n10485 33 138 60 125 87 \n8908 147 140 127 96 60 \n10930 158 160 160 160 160 \n3289 17 15 21 64 78 \n10972 30 30 29 28 28 \n... ... ... ... ... ... \n6511 202 201 200 200 200 \n5439 23 20 19 13 7 \n11239 208 207 207 207 207 \n8964 122 40 129 36 147 \n11824 248 248 248 248 247 \n\n[2433 rows x 10001 columns]\nTest Data: \n Label Pixel1 Pixel2 Pixel3 Pixel4 Pixel5 Pixel6 Pixel7 Pixel8 \\\n5510 43 17 15 13 15 23 32 40 45 \n5330 42 65 49 44 36 36 80 133 147 \n4531 36 0 0 0 0 0 0 0 0 \n365 3 241 242 242 243 245 246 246 245 \n12144 99 0 0 0 0 0 0 0 0 \n... ... ... ... ... ... ... ... ... ... \n11964 98 67 79 78 59 72 74 75 78 \n5191 41 168 168 168 168 168 168 168 168 \n5390 42 35 35 35 35 36 36 36 36 \n860 7 167 164 149 155 170 185 203 201 \n7270 58 177 179 182 184 186 188 191 192 \n\n Pixel9 ... Pixel9991 Pixel9992 Pixel9993 Pixel9994 Pixel9995 \\\n5510 49 ... 119 110 122 115 97 \n5330 149 ... 169 168 210 238 237 \n4531 0 ... 202 207 208 203 207 \n365 246 ... 63 60 54 53 50 \n12144 0 ... 177 177 172 165 160 \n... ... ... ... ... ... ... ... \n11964 81 ... 59 61 62 62 59 \n5191 169 ... 89 103 158 154 172 \n5390 37 ... 59 65 52 35 34 \n860 204 ... 33 30 29 30 31 \n7270 195 ... 105 94 102 101 104 \n\n Pixel9996 Pixel9997 Pixel9998 Pixel9999 Pixel10000 \n5510 105 107 102 71 127 \n5330 240 242 246 248 245 \n4531 206 205 211 212 109 \n365 43 43 47 37 28 \n12144 150 140 134 129 129 \n... ... ... ... ... ... \n11964 56 52 27 26 56 \n5191 194 178 149 174 185 \n5390 44 53 61 65 65 \n860 33 30 21 33 24 \n7270 100 106 105 105 105 \n\n[2434 rows x 10001 columns]\n"
]
],
[
[
"Seprating the labels from the training, validation and testing data. And converting to numpy arrays.",
"_____no_output_____"
]
],
[
[
"X_train = train.drop(['Label'], axis=1)\nY_train = train['Label']\n\nX_validate = validate.drop(['Label'], axis=1)\nY_validate = validate['Label']\n\nX_test = test.drop(['Label'], axis=1)\nY_test = test['Label']",
"_____no_output_____"
]
],
[
[
"## 2. Data preprocessing: ",
"_____no_output_____"
],
[
"Converting the input dataframes (X_train, X_validate, X_test) to numpy arrays for better processing and easier operations. ",
"_____no_output_____"
]
],
[
[
"X_train = X_train.to_numpy()\nX_validate = X_validate.to_numpy()\nX_test = X_test.to_numpy()",
"_____no_output_____"
]
],
[
[
"The values of pixel range from 0 to 255 based on the intensity of the colour, for easier and faster processing, we'll scale the values to be in between 0 and 1. ",
"_____no_output_____"
]
],
[
[
"X_train = X_train / 255.0\nX_validate = X_validate / 255.0\nX_test = X_test / 255.0",
"_____no_output_____"
]
],
[
[
"Reshaping the values of the 3 arrays to (size X 100 X 100 X 1) where size is the number of images in the array, 100 X 100 represents the resolution of the image and 1 represents the channel i.e. the image is a grayscale image. ",
"_____no_output_____"
]
],
[
[
"X_train = X_train.reshape(-1, 100, 100, 1)\nX_validate = X_validate.reshape(-1, 100, 100, 1)\nX_test = X_test.reshape(-1, 100, 100, 1)",
"_____no_output_____"
],
[
"print(Y_test.iloc[0])",
"43\n"
]
],
[
[
"Now the training input is preprocessed for better processing by the planned deep learning architecture. \nNow, we'll convert the labels to one-hot encoded vectors by using to_categorical() function from the utils module of keras package.",
"_____no_output_____"
]
],
[
[
"from keras.utils import to_categorical\n\nY_train = to_categorical(Y_train, num_classes=100)\nY_validate = to_categorical(Y_validate, num_classes=100)\nY_test = to_categorical(Y_test, num_classes=100)",
"_____no_output_____"
],
[
"print(Y_test[1,:])",
"[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0.]\n"
]
],
[
[
"## 3. Model Creation and Training:",
"_____no_output_____"
]
],
[
[
"from keras import models\nfrom keras.models import Sequential\nfrom keras import layers\nfrom keras.layers import Conv2D, MaxPool2D, Flatten, Dense\nfrom keras.optimizers import Adam\n\nmodel = Sequential()\nmodel.add(Conv2D(input_shape=(100,100,1),filters=64,kernel_size=(3,3),padding=\"same\", activation=\"relu\"))\nmodel.add(Conv2D(filters=64,kernel_size=(3,3),padding=\"same\", activation=\"relu\"))\nmodel.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))\nmodel.add(Conv2D(filters=128, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\nmodel.add(Conv2D(filters=128, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\nmodel.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))\nmodel.add(Conv2D(filters=256, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\nmodel.add(Conv2D(filters=256, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\nmodel.add(Conv2D(filters=256, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\nmodel.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))\nmodel.add(Conv2D(filters=512, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\nmodel.add(Conv2D(filters=512, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\nmodel.add(Conv2D(filters=512, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\nmodel.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))\nmodel.add(Conv2D(filters=512, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\nmodel.add(Conv2D(filters=512, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\nmodel.add(Conv2D(filters=512, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\nmodel.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))\n\n\nmodel.add(Flatten())\nmodel.add(Dense(units=4096,activation=\"relu\"))\nmodel.add(Dense(units=4096,activation=\"relu\"))\nmodel.add(Dense(units=100, activation=\"softmax\"))\n\nmodel.summary()",
"Model: \"sequential_5\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d_32 (Conv2D) (None, 100, 100, 64) 640 \n_________________________________________________________________\nconv2d_33 (Conv2D) (None, 100, 100, 64) 36928 \n_________________________________________________________________\nmax_pooling2d_12 (MaxPooling (None, 50, 50, 64) 0 \n_________________________________________________________________\nconv2d_34 (Conv2D) (None, 50, 50, 128) 73856 \n_________________________________________________________________\nconv2d_35 (Conv2D) (None, 50, 50, 128) 147584 \n_________________________________________________________________\nmax_pooling2d_13 (MaxPooling (None, 25, 25, 128) 0 \n_________________________________________________________________\nconv2d_36 (Conv2D) (None, 25, 25, 256) 295168 \n_________________________________________________________________\nconv2d_37 (Conv2D) (None, 25, 25, 256) 590080 \n_________________________________________________________________\nconv2d_38 (Conv2D) (None, 25, 25, 256) 590080 \n_________________________________________________________________\nmax_pooling2d_14 (MaxPooling (None, 12, 12, 256) 0 \n_________________________________________________________________\nconv2d_39 (Conv2D) (None, 12, 12, 512) 1180160 \n_________________________________________________________________\nconv2d_40 (Conv2D) (None, 12, 12, 512) 2359808 \n_________________________________________________________________\nconv2d_41 (Conv2D) (None, 12, 12, 512) 2359808 \n_________________________________________________________________\nmax_pooling2d_15 (MaxPooling (None, 6, 6, 512) 0 \n_________________________________________________________________\nconv2d_42 (Conv2D) (None, 6, 6, 512) 2359808 \n_________________________________________________________________\nconv2d_43 (Conv2D) (None, 6, 6, 512) 2359808 \n_________________________________________________________________\nconv2d_44 (Conv2D) (None, 6, 6, 512) 2359808 \n_________________________________________________________________\nmax_pooling2d_16 (MaxPooling (None, 3, 3, 512) 0 \n_________________________________________________________________\nflatten_2 (Flatten) (None, 4608) 0 \n_________________________________________________________________\ndense_2 (Dense) (None, 4096) 18878464 \n_________________________________________________________________\ndense_3 (Dense) (None, 4096) 16781312 \n_________________________________________________________________\ndense_4 (Dense) (None, 100) 409700 \n=================================================================\nTotal params: 50,783,012\nTrainable params: 50,783,012\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"model.compile(optimizer=Adam(0.001),\n loss='categorical_crossentropy',\n metrics=['accuracy'])",
"_____no_output_____"
]
],
[
[
"These callbacks will help us in the model training",
"_____no_output_____"
]
],
[
[
"from keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, EarlyStopping\n\ncheckpoint = ModelCheckpoint(\"vgg16_1.h5\", monitor='val_acc', \n verbose=1, save_best_only=True, save_weights_only=False, mode='auto', save_freq=1)\nearly = EarlyStopping(monitor='val_acc', min_delta=0, patience=20, verbose=1, mode='auto')\n\nlearning_rate_reduction = ReduceLROnPlateau(monitor='val_loss', patience= 5, verbose=1, factor=0.25, min_lr=0.00001)",
"_____no_output_____"
]
],
[
[
"The following callback is for tensorboard to give a visualization of the training. ",
"_____no_output_____"
]
],
[
[
"import os\nimport datetime\n\nlog_dir = \"logs/fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\nos.mkdir(log_dir)\ntensorboard = tf.keras.callbacks.TensorBoard(log_dir='log_dir', histogram_freq=1, embeddings_freq=1)",
"_____no_output_____"
],
[
"model.fit(X_train, Y_train, steps_per_epoch=100, epochs = 10, batch_size=32, \n validation_data=(X_validate, Y_validate), verbose=1, \n callbacks=[learning_rate_reduction, tensorboard, early], shuffle=False)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4a18c85a9a5fbd8a07becfe0595da2dcc4f7965a
| 40,954 |
ipynb
|
Jupyter Notebook
|
scripts/Figure1.ipynb
|
shayenne/vocaldetection
|
1a5e9fbfe810cc8ed8c40ac1a9cd91183c63cda4
|
[
"MIT"
] | 1 |
2020-01-02T11:17:49.000Z
|
2020-01-02T11:17:49.000Z
|
scripts/Figure1.ipynb
|
shayenne/vocaldetection
|
1a5e9fbfe810cc8ed8c40ac1a9cd91183c63cda4
|
[
"MIT"
] | 15 |
2020-01-28T23:12:46.000Z
|
2022-03-12T00:09:58.000Z
|
scripts/Figure1.ipynb
|
shayenne/vocaldetection
|
1a5e9fbfe810cc8ed8c40ac1a9cd91183c63cda4
|
[
"MIT"
] | null | null | null | 219.005348 | 37,320 | 0.912145 |
[
[
[
"import os.path as op\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline",
"_____no_output_____"
],
[
"plt.style.use('ggplot')",
"_____no_output_____"
],
[
"import vocaldetection as sb\ndata_path = op.join(sb.__path__[0], 'data')",
"_____no_output_____"
],
[
"ortho_x, ortho_y, ortho_n = sb.transform_data(op.join(data_path, 'ortho.csv'))\npara_x, para_y, para_n = sb.transform_data(op.join(data_path, 'para.csv'))",
"_____no_output_____"
],
[
"model = sb.Model()",
"_____no_output_____"
],
[
"ortho_fit = model.fit(ortho_x, ortho_y)\npara_fit = model.fit(para_x, para_y)",
"_____no_output_____"
],
[
"ortho_fit.params",
"_____no_output_____"
],
[
"para_fit.params",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(1)\nx_predict = np.linspace(0, 1, 100)\nfor x, y, n in zip(ortho_x, ortho_y, ortho_n):\n ax.plot(x, y, 'bo', markersize=n)\n ax.plot(x_predict, ortho_fit.predict(x_predict), 'b')\n\nfor x,y,n in zip(para_x, para_y, para_n):\n ax.plot(x, y, 'go', markersize=n)\n ax.plot(x_predict, para_fit.predict(x_predict), 'g')\n\nax.set_xlabel('Contrast in interval 1')\nax.set_ylabel(\"Proportion answers '1'\")\nax.set_ylim([-0.1, 1.1])\nax.set_xlim([-0.1, 1.1])\nfig.set_size_inches([8,8])",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a18d4785e92010554f93014e6fa39013892e366
| 28,602 |
ipynb
|
Jupyter Notebook
|
tutorials/CausalityTools_tutorial_SMeasureTest.ipynb
|
bhannis/CausalityTools.jl
|
7a1d048e287b413e5588d0e8aefb54e65f6de400
|
[
"MIT"
] | 24 |
2020-06-11T01:51:15.000Z
|
2022-03-29T18:39:43.000Z
|
tutorials/CausalityTools_tutorial_SMeasureTest.ipynb
|
bhannis/CausalityTools.jl
|
7a1d048e287b413e5588d0e8aefb54e65f6de400
|
[
"MIT"
] | 27 |
2020-12-21T02:52:22.000Z
|
2022-01-31T10:40:08.000Z
|
tutorials/CausalityTools_tutorial_SMeasureTest.ipynb
|
bhannis/CausalityTools.jl
|
7a1d048e287b413e5588d0e8aefb54e65f6de400
|
[
"MIT"
] | 7 |
2020-09-27T08:56:28.000Z
|
2021-12-08T15:05:51.000Z
| 71.149254 | 821 | 0.578246 |
[
[
[
"empty"
]
]
] |
[
"empty"
] |
[
[
"empty"
]
] |
4a18d80bc87bd43b57606a8ec66973aba8fb0d18
| 183,039 |
ipynb
|
Jupyter Notebook
|
sentiment-network/Sentiment_Classification_Projects.ipynb
|
trung-gm/deep-learning
|
66f3fc87794d14933a58bb24c47958fd652a5afe
|
[
"MIT"
] | null | null | null |
sentiment-network/Sentiment_Classification_Projects.ipynb
|
trung-gm/deep-learning
|
66f3fc87794d14933a58bb24c47958fd652a5afe
|
[
"MIT"
] | null | null | null |
sentiment-network/Sentiment_Classification_Projects.ipynb
|
trung-gm/deep-learning
|
66f3fc87794d14933a58bb24c47958fd652a5afe
|
[
"MIT"
] | null | null | null | 17.950279 | 38,682 | 0.442572 |
[
[
[
"# Sentiment Classification & How To \"Frame Problems\" for a Neural Network\n\nby Andrew Trask\n\n- **Twitter**: @iamtrask\n- **Blog**: http://iamtrask.github.io",
"_____no_output_____"
],
[
"\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n### What You Should Already Know\n\n- neural networks, forward and back-propagation\n- stochastic gradient descent\n- mean squared error\n- and train/test splits\n\n### Where to Get Help if You Need it\n- Re-watch previous Udacity Lectures\n- Leverage the recommended Course Reading Material - [Grokking Deep Learning](https://www.manning.com/books/grokking-deep-learning) (Check inside your classroom for a discount code)\n- Shoot me a tweet @iamtrask\n\n\n### Tutorial Outline:\n\n- Intro: The Importance of \"Framing a Problem\" (this lesson)\n\n- [Curate a Dataset](#lesson_1)\n- [Developing a \"Predictive Theory\"](#lesson_2)\n- [**PROJECT 1**: Quick Theory Validation](#project_1)\n\n\n- [Transforming Text to Numbers](#lesson_3)\n- [**PROJECT 2**: Creating the Input/Output Data](#project_2)\n\n\n- Putting it all together in a Neural Network (video only - nothing in notebook)\n- [**PROJECT 3**: Building our Neural Network](#project_3)\n\n\n- [Understanding Neural Noise](#lesson_4)\n- [**PROJECT 4**: Making Learning Faster by Reducing Noise](#project_4)\n\n\n- [Analyzing Inefficiencies in our Network](#lesson_5)\n- [**PROJECT 5**: Making our Network Train and Run Faster](#project_5)\n\n\n- [Further Noise Reduction](#lesson_6)\n- [**PROJECT 6**: Reducing Noise by Strategically Reducing the Vocabulary](#project_6)\n\n\n- [Analysis: What's going on in the weights?](#lesson_7)",
"_____no_output_____"
],
[
"# Lesson: Curate a Dataset<a id='lesson_1'></a>\nThe cells from here until Project 1 include code Andrew shows in the videos leading up to mini project 1. We've included them so you can run the code along with the videos without having to type in everything.",
"_____no_output_____"
]
],
[
[
"def pretty_print_review_and_label(i):\n print(labels[i] + \"\\t:\\t\" + reviews[i][:80] + \"...\")\n\ng = open('reviews.txt','r') # What we know!\nreviews = list(map(lambda x:x[:-1],g.readlines()))\ng.close()\n\ng = open('labels.txt','r') # What we WANT to know!\nlabels = list(map(lambda x:x[:-1].upper(),g.readlines()))\ng.close()",
"_____no_output_____"
]
],
[
[
"**Note:** The data in `reviews.txt` we're using has already been preprocessed a bit and contains only lower case characters. If we were working from raw data, where we didn't know it was all lower case, we would want to add a step here to convert it. That's so we treat different variations of the same word, like `The`, `the`, and `THE`, all the same way.",
"_____no_output_____"
]
],
[
[
"len(reviews)",
"_____no_output_____"
],
[
"reviews[0]",
"_____no_output_____"
],
[
"labels[0]",
"_____no_output_____"
]
],
[
[
"# Lesson: Develop a Predictive Theory<a id='lesson_2'></a>",
"_____no_output_____"
]
],
[
[
"print(\"labels.txt \\t : \\t reviews.txt\\n\")\npretty_print_review_and_label(2137)\npretty_print_review_and_label(12816)\npretty_print_review_and_label(6267)\npretty_print_review_and_label(21934)\npretty_print_review_and_label(5297)\npretty_print_review_and_label(4998)",
"_____no_output_____"
]
],
[
[
"# Project 1: Quick Theory Validation<a id='project_1'></a>\n\nThere are multiple ways to implement these projects, but in order to get your code closer to what Andrew shows in his solutions, we've provided some hints and starter code throughout this notebook.\n\nYou'll find the [Counter](https://docs.python.org/2/library/collections.html#collections.Counter) class to be useful in this exercise, as well as the [numpy](https://docs.scipy.org/doc/numpy/reference/) library.",
"_____no_output_____"
]
],
[
[
"from collections import Counter\nimport numpy as np",
"_____no_output_____"
]
],
[
[
"We'll create three `Counter` objects, one for words from postive reviews, one for words from negative reviews, and one for all the words.",
"_____no_output_____"
]
],
[
[
"# Create three Counter objects to store positive, negative and total counts\npositive_counts = Counter()\nnegative_counts = Counter()\ntotal_counts = Counter()",
"_____no_output_____"
]
],
[
[
"**TODO:** Examine all the reviews. For each word in a positive review, increase the count for that word in both your positive counter and the total words counter; likewise, for each word in a negative review, increase the count for that word in both your negative counter and the total words counter.\n\n**Note:** Throughout these projects, you should use `split(' ')` to divide a piece of text (such as a review) into individual words. If you use `split()` instead, you'll get slightly different results than what the videos and solutions show.",
"_____no_output_____"
]
],
[
[
"# TODO: Loop over all the words in all the reviews and increment the counts in the appropriate counter objects",
"_____no_output_____"
]
],
[
[
"Run the following two cells to list the words used in positive reviews and negative reviews, respectively, ordered from most to least commonly used. ",
"_____no_output_____"
]
],
[
[
"# Examine the counts of the most common words in positive reviews\npositive_counts.most_common()",
"_____no_output_____"
],
[
"# Examine the counts of the most common words in negative reviews\nnegative_counts.most_common()",
"_____no_output_____"
]
],
[
[
"As you can see, common words like \"the\" appear very often in both positive and negative reviews. Instead of finding the most common words in positive or negative reviews, what you really want are the words found in positive reviews more often than in negative reviews, and vice versa. To accomplish this, you'll need to calculate the **ratios** of word usage between positive and negative reviews.\n\n**TODO:** Check all the words you've seen and calculate the ratio of postive to negative uses and store that ratio in `pos_neg_ratios`. \n>Hint: the positive-to-negative ratio for a given word can be calculated with `positive_counts[word] / float(negative_counts[word]+1)`. Notice the `+1` in the denominator – that ensures we don't divide by zero for words that are only seen in positive reviews.",
"_____no_output_____"
]
],
[
[
"# Create Counter object to store positive/negative ratios\npos_neg_ratios = Counter()\n\n# TODO: Calculate the ratios of positive and negative uses of the most common words\n# Consider words to be \"common\" if they've been used at least 100 times",
"_____no_output_____"
]
],
[
[
"Examine the ratios you've calculated for a few words:",
"_____no_output_____"
]
],
[
[
"print(\"Pos-to-neg ratio for 'the' = {}\".format(pos_neg_ratios[\"the\"]))\nprint(\"Pos-to-neg ratio for 'amazing' = {}\".format(pos_neg_ratios[\"amazing\"]))\nprint(\"Pos-to-neg ratio for 'terrible' = {}\".format(pos_neg_ratios[\"terrible\"]))",
"_____no_output_____"
]
],
[
[
"Looking closely at the values you just calculated, we see the following:\n\n* Words that you would expect to see more often in positive reviews – like \"amazing\" – have a ratio greater than 1. The more skewed a word is toward postive, the farther from 1 its positive-to-negative ratio will be.\n* Words that you would expect to see more often in negative reviews – like \"terrible\" – have positive values that are less than 1. The more skewed a word is toward negative, the closer to zero its positive-to-negative ratio will be.\n* Neutral words, which don't really convey any sentiment because you would expect to see them in all sorts of reviews – like \"the\" – have values very close to 1. A perfectly neutral word – one that was used in exactly the same number of positive reviews as negative reviews – would be almost exactly 1. The `+1` we suggested you add to the denominator slightly biases words toward negative, but it won't matter because it will be a tiny bias and later we'll be ignoring words that are too close to neutral anyway.\n\nOk, the ratios tell us which words are used more often in postive or negative reviews, but the specific values we've calculated are a bit difficult to work with. A very positive word like \"amazing\" has a value above 4, whereas a very negative word like \"terrible\" has a value around 0.18. Those values aren't easy to compare for a couple of reasons:\n\n* Right now, 1 is considered neutral, but the absolute value of the postive-to-negative rations of very postive words is larger than the absolute value of the ratios for the very negative words. So there is no way to directly compare two numbers and see if one word conveys the same magnitude of positive sentiment as another word conveys negative sentiment. So we should center all the values around netural so the absolute value fro neutral of the postive-to-negative ratio for a word would indicate how much sentiment (positive or negative) that word conveys.\n* When comparing absolute values it's easier to do that around zero than one. \n\nTo fix these issues, we'll convert all of our ratios to new values using logarithms.\n\n**TODO:** Go through all the ratios you calculated and convert their values using the following formulas:\n> * For any postive words, convert the ratio using `np.log(ratio)`\n> * For any negative words, convert the ratio using `-np.log(1/(ratio + 0.01))`\n\nThat second equation may look strange, but what it's doing is dividing one by a very small number, which will produce a larger positive number. Then, it takes the `log` of that, which produces numbers similar to the ones for the postive words. Finally, we negate the values by adding that minus sign up front. In the end, extremely positive and extremely negative words will have positive-to-negative ratios with similar magnitudes but oppositite signs.",
"_____no_output_____"
]
],
[
[
"# TODO: Convert ratios to logs",
"_____no_output_____"
]
],
[
[
"Examine the new ratios you've calculated for the same words from before:",
"_____no_output_____"
]
],
[
[
"print(\"Pos-to-neg ratio for 'the' = {}\".format(pos_neg_ratios[\"the\"]))\nprint(\"Pos-to-neg ratio for 'amazing' = {}\".format(pos_neg_ratios[\"amazing\"]))\nprint(\"Pos-to-neg ratio for 'terrible' = {}\".format(pos_neg_ratios[\"terrible\"]))",
"_____no_output_____"
]
],
[
[
"If everything worked, now you should see neutral words with values close to zero. In this case, \"the\" is near zero but slightly positive, so it was probably used in more positive reviews than negative reviews. But look at \"amazing\"'s ratio - it's above `1`, showing it is clearly a word with positive sentiment. And \"terrible\" has a similar score, but in the opposite direction, so it's below `-1`. It's now clear that both of these words are associated with specific, opposing sentiments.\n\nNow run the following cells to see more ratios. \n\nThe first cell displays all the words, ordered by how associated they are with postive reviews. (Your notebook will most likely truncate the output so you won't actually see *all* the words in the list.)\n\nThe second cell displays the 30 words most associated with negative reviews by reversing the order of the first list and then looking at the first 30 words. (If you want the second cell to display all the words, ordered by how associated they are with negative reviews, you could just write `reversed(pos_neg_ratios.most_common())`.)\n\nYou should continue to see values similar to the earlier ones we checked – neutral words will be close to `0`, words will get more positive as their ratios approach and go above `1`, and words will get more negative as their ratios approach and go below `-1`. That's why we decided to use the logs instead of the raw ratios.",
"_____no_output_____"
]
],
[
[
"# words most frequently seen in a review with a \"POSITIVE\" label\npos_neg_ratios.most_common()",
"_____no_output_____"
],
[
"# words most frequently seen in a review with a \"NEGATIVE\" label\nlist(reversed(pos_neg_ratios.most_common()))[0:30]\n\n# Note: Above is the code Andrew uses in his solution video, \n# so we've included it here to avoid confusion.\n# If you explore the documentation for the Counter class, \n# you will see you could also find the 30 least common\n# words like this: pos_neg_ratios.most_common()[:-31:-1]",
"_____no_output_____"
]
],
[
[
"# End of Project 1. \n## Watch the next video to see Andrew's solution, then continue on to the next lesson.\n\n# Transforming Text into Numbers<a id='lesson_3'></a>\nThe cells here include code Andrew shows in the next video. We've included it so you can run the code along with the video without having to type in everything.",
"_____no_output_____"
]
],
[
[
"from IPython.display import Image\n\nreview = \"This was a horrible, terrible movie.\"\n\nImage(filename='sentiment_network.png')",
"_____no_output_____"
],
[
"review = \"The movie was excellent\"\n\nImage(filename='sentiment_network_pos.png')",
"_____no_output_____"
]
],
[
[
"# Project 2: Creating the Input/Output Data<a id='project_2'></a>\n\n**TODO:** Create a [set](https://docs.python.org/3/tutorial/datastructures.html#sets) named `vocab` that contains every word in the vocabulary.",
"_____no_output_____"
]
],
[
[
"# TODO: Create set named \"vocab\" containing all of the words from all of the reviews\nvocab = None",
"_____no_output_____"
]
],
[
[
"Run the following cell to check your vocabulary size. If everything worked correctly, it should print **74074**",
"_____no_output_____"
]
],
[
[
"vocab_size = len(vocab)\nprint(vocab_size)",
"_____no_output_____"
]
],
[
[
"Take a look at the following image. It represents the layers of the neural network you'll be building throughout this notebook. `layer_0` is the input layer, `layer_1` is a hidden layer, and `layer_2` is the output layer.",
"_____no_output_____"
]
],
[
[
"from IPython.display import Image\nImage(filename='sentiment_network_2.png')",
"_____no_output_____"
]
],
[
[
"**TODO:** Create a numpy array called `layer_0` and initialize it to all zeros. You will find the [zeros](https://docs.scipy.org/doc/numpy/reference/generated/numpy.zeros.html) function particularly helpful here. Be sure you create `layer_0` as a 2-dimensional matrix with 1 row and `vocab_size` columns. ",
"_____no_output_____"
]
],
[
[
"# TODO: Create layer_0 matrix with dimensions 1 by vocab_size, initially filled with zeros\nlayer_0 = None",
"_____no_output_____"
]
],
[
[
"Run the following cell. It should display `(1, 74074)`",
"_____no_output_____"
]
],
[
[
"layer_0.shape",
"_____no_output_____"
],
[
"from IPython.display import Image\nImage(filename='sentiment_network.png')",
"_____no_output_____"
]
],
[
[
"`layer_0` contains one entry for every word in the vocabulary, as shown in the above image. We need to make sure we know the index of each word, so run the following cell to create a lookup table that stores the index of every word.",
"_____no_output_____"
]
],
[
[
"# Create a dictionary of words in the vocabulary mapped to index positions\n# (to be used in layer_0)\nword2index = {}\nfor i,word in enumerate(vocab):\n word2index[word] = i\n \n# display the map of words to indices\nword2index",
"_____no_output_____"
]
],
[
[
"**TODO:** Complete the implementation of `update_input_layer`. It should count \n how many times each word is used in the given review, and then store\n those counts at the appropriate indices inside `layer_0`.",
"_____no_output_____"
]
],
[
[
"def update_input_layer(review):\n \"\"\" Modify the global layer_0 to represent the vector form of review.\n The element at a given index of layer_0 should represent\n how many times the given word occurs in the review.\n Args:\n review(string) - the string of the review\n Returns:\n None\n \"\"\"\n global layer_0\n # clear out previous state by resetting the layer to be all 0s\n layer_0 *= 0\n \n # TODO: count how many times each word is used in the given review and store the results in layer_0 ",
"_____no_output_____"
]
],
[
[
"Run the following cell to test updating the input layer with the first review. The indices assigned may not be the same as in the solution, but hopefully you'll see some non-zero values in `layer_0`. ",
"_____no_output_____"
]
],
[
[
"update_input_layer(reviews[0])\nlayer_0",
"_____no_output_____"
]
],
[
[
"**TODO:** Complete the implementation of `get_target_for_labels`. It should return `0` or `1`, \n depending on whether the given label is `NEGATIVE` or `POSITIVE`, respectively.",
"_____no_output_____"
]
],
[
[
"def get_target_for_label(label):\n \"\"\"Convert a label to `0` or `1`.\n Args:\n label(string) - Either \"POSITIVE\" or \"NEGATIVE\".\n Returns:\n `0` or `1`.\n \"\"\"\n # TODO: Your code here",
"_____no_output_____"
]
],
[
[
"Run the following two cells. They should print out`'POSITIVE'` and `1`, respectively.",
"_____no_output_____"
]
],
[
[
"labels[0]",
"_____no_output_____"
],
[
"get_target_for_label(labels[0])",
"_____no_output_____"
]
],
[
[
"Run the following two cells. They should print out `'NEGATIVE'` and `0`, respectively.",
"_____no_output_____"
]
],
[
[
"labels[1]",
"_____no_output_____"
],
[
"get_target_for_label(labels[1])",
"_____no_output_____"
]
],
[
[
"# End of Project 2. \n## Watch the next video to see Andrew's solution, then continue on to the next lesson.",
"_____no_output_____"
],
[
"# Project 3: Building a Neural Network<a id='project_3'></a>",
"_____no_output_____"
],
[
"**TODO:** We've included the framework of a class called `SentimentNetork`. Implement all of the items marked `TODO` in the code. These include doing the following:\n- Create a basic neural network much like the networks you've seen in earlier lessons and in Project 1, with an input layer, a hidden layer, and an output layer. \n- Do **not** add a non-linearity in the hidden layer. That is, do not use an activation function when calculating the hidden layer outputs.\n- Re-use the code from earlier in this notebook to create the training data (see `TODO`s in the code)\n- Implement the `pre_process_data` function to create the vocabulary for our training data generating functions\n- Ensure `train` trains over the entire corpus",
"_____no_output_____"
],
[
"### Where to Get Help if You Need it\n- Re-watch earlier Udacity lectures\n- Chapters 3-5 - [Grokking Deep Learning](https://www.manning.com/books/grokking-deep-learning) - (Check inside your classroom for a discount code)",
"_____no_output_____"
]
],
[
[
"import time\nimport sys\nimport numpy as np\n\n# Encapsulate our neural network in a class\nclass SentimentNetwork:\n def __init__(self, reviews, labels, hidden_nodes = 10, learning_rate = 0.1):\n \"\"\"Create a SentimenNetwork with the given settings\n Args:\n reviews(list) - List of reviews used for training\n labels(list) - List of POSITIVE/NEGATIVE labels associated with the given reviews\n hidden_nodes(int) - Number of nodes to create in the hidden layer\n learning_rate(float) - Learning rate to use while training\n \n \"\"\"\n # Assign a seed to our random number generator to ensure we get\n # reproducable results during development \n np.random.seed(1)\n\n # process the reviews and their associated labels so that everything\n # is ready for training\n self.pre_process_data(reviews, labels)\n \n # Build the network to have the number of hidden nodes and the learning rate that\n # were passed into this initializer. Make the same number of input nodes as\n # there are vocabulary words and create a single output node.\n self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate)\n\n def pre_process_data(self, reviews, labels):\n \n review_vocab = set()\n # TODO: populate review_vocab with all of the words in the given reviews\n # Remember to split reviews into individual words \n # using \"split(' ')\" instead of \"split()\".\n \n # Convert the vocabulary set to a list so we can access words via indices\n self.review_vocab = list(review_vocab)\n \n label_vocab = set()\n # TODO: populate label_vocab with all of the words in the given labels.\n # There is no need to split the labels because each one is a single word.\n \n # Convert the label vocabulary set to a list so we can access labels via indices\n self.label_vocab = list(label_vocab)\n \n # Store the sizes of the review and label vocabularies.\n self.review_vocab_size = len(self.review_vocab)\n self.label_vocab_size = len(self.label_vocab)\n \n # Create a dictionary of words in the vocabulary mapped to index positions\n self.word2index = {}\n # TODO: populate self.word2index with indices for all the words in self.review_vocab\n # like you saw earlier in the notebook\n \n # Create a dictionary of labels mapped to index positions\n self.label2index = {}\n # TODO: do the same thing you did for self.word2index and self.review_vocab, \n # but for self.label2index and self.label_vocab instead\n \n \n def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate):\n # Store the number of nodes in input, hidden, and output layers.\n self.input_nodes = input_nodes\n self.hidden_nodes = hidden_nodes\n self.output_nodes = output_nodes\n\n # Store the learning rate\n self.learning_rate = learning_rate\n\n # Initialize weights\n \n # TODO: initialize self.weights_0_1 as a matrix of zeros. These are the weights between\n # the input layer and the hidden layer.\n self.weights_0_1 = None\n \n # TODO: initialize self.weights_1_2 as a matrix of random values. \n # These are the weights between the hidden layer and the output layer.\n self.weights_1_2 = None\n \n # TODO: Create the input layer, a two-dimensional matrix with shape \n # 1 x input_nodes, with all values initialized to zero\n self.layer_0 = np.zeros((1,input_nodes))\n \n \n def update_input_layer(self,review):\n # TODO: You can copy most of the code you wrote for update_input_layer \n # earlier in this notebook. \n #\n # However, MAKE SURE YOU CHANGE ALL VARIABLES TO REFERENCE\n # THE VERSIONS STORED IN THIS OBJECT, NOT THE GLOBAL OBJECTS.\n # For example, replace \"layer_0 *= 0\" with \"self.layer_0 *= 0\"\n pass\n \n def get_target_for_label(self,label):\n # TODO: Copy the code you wrote for get_target_for_label \n # earlier in this notebook. \n pass\n \n def sigmoid(self,x):\n # TODO: Return the result of calculating the sigmoid activation function\n # shown in the lectures\n pass\n \n def sigmoid_output_2_derivative(self,output):\n # TODO: Return the derivative of the sigmoid activation function, \n # where \"output\" is the original output from the sigmoid fucntion \n pass\n\n def train(self, training_reviews, training_labels):\n \n # make sure out we have a matching number of reviews and labels\n assert(len(training_reviews) == len(training_labels))\n \n # Keep track of correct predictions to display accuracy during training \n correct_so_far = 0\n \n # Remember when we started for printing time statistics\n start = time.time()\n\n # loop through all the given reviews and run a forward and backward pass,\n # updating weights for every item\n for i in range(len(training_reviews)):\n \n # TODO: Get the next review and its correct label\n \n # TODO: Implement the forward pass through the network. \n # That means use the given review to update the input layer, \n # then calculate values for the hidden layer,\n # and finally calculate the output layer.\n # \n # Do not use an activation function for the hidden layer,\n # but use the sigmoid activation function for the output layer.\n \n # TODO: Implement the back propagation pass here. \n # That means calculate the error for the forward pass's prediction\n # and update the weights in the network according to their\n # contributions toward the error, as calculated via the\n # gradient descent and back propagation algorithms you \n # learned in class.\n \n # TODO: Keep track of correct predictions. To determine if the prediction was\n # correct, check that the absolute value of the output error \n # is less than 0.5. If so, add one to the correct_so_far count.\n \n # For debug purposes, print out our prediction accuracy and speed \n # throughout the training process. \n\n elapsed_time = float(time.time() - start)\n reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0\n \n sys.stdout.write(\"\\rProgress:\" + str(100 * i/float(len(training_reviews)))[:4] \\\n + \"% Speed(reviews/sec):\" + str(reviews_per_second)[0:5] \\\n + \" #Correct:\" + str(correct_so_far) + \" #Trained:\" + str(i+1) \\\n + \" Training Accuracy:\" + str(correct_so_far * 100 / float(i+1))[:4] + \"%\")\n if(i % 2500 == 0):\n print(\"\")\n \n def test(self, testing_reviews, testing_labels):\n \"\"\"\n Attempts to predict the labels for the given testing_reviews,\n and uses the test_labels to calculate the accuracy of those predictions.\n \"\"\"\n \n # keep track of how many correct predictions we make\n correct = 0\n\n # we'll time how many predictions per second we make\n start = time.time()\n\n # Loop through each of the given reviews and call run to predict\n # its label. \n for i in range(len(testing_reviews)):\n pred = self.run(testing_reviews[i])\n if(pred == testing_labels[i]):\n correct += 1\n \n # For debug purposes, print out our prediction accuracy and speed \n # throughout the prediction process. \n\n elapsed_time = float(time.time() - start)\n reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0\n \n sys.stdout.write(\"\\rProgress:\" + str(100 * i/float(len(testing_reviews)))[:4] \\\n + \"% Speed(reviews/sec):\" + str(reviews_per_second)[0:5] \\\n + \" #Correct:\" + str(correct) + \" #Tested:\" + str(i+1) \\\n + \" Testing Accuracy:\" + str(correct * 100 / float(i+1))[:4] + \"%\")\n \n def run(self, review):\n \"\"\"\n Returns a POSITIVE or NEGATIVE prediction for the given review.\n \"\"\"\n # TODO: Run a forward pass through the network, like you did in the\n # \"train\" function. That means use the given review to \n # update the input layer, then calculate values for the hidden layer,\n # and finally calculate the output layer.\n #\n # Note: The review passed into this function for prediction \n # might come from anywhere, so you should convert it \n # to lower case prior to using it.\n \n # TODO: The output layer should now contain a prediction. \n # Return `POSITIVE` for predictions greater-than-or-equal-to `0.5`, \n # and `NEGATIVE` otherwise.\n pass\n",
"_____no_output_____"
]
],
[
[
"Run the following cell to create a `SentimentNetwork` that will train on all but the last 1000 reviews (we're saving those for testing). Here we use a learning rate of `0.1`.",
"_____no_output_____"
]
],
[
[
"mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1)",
"_____no_output_____"
]
],
[
[
"Run the following cell to test the network's performance against the last 1000 reviews (the ones we held out from our training set). \n\n**We have not trained the model yet, so the results should be about 50% as it will just be guessing and there are only two possible values to choose from.**",
"_____no_output_____"
]
],
[
[
"mlp.test(reviews[-1000:],labels[-1000:])",
"_____no_output_____"
]
],
[
[
"Run the following cell to actually train the network. During training, it will display the model's accuracy repeatedly as it trains so you can see how well it's doing.",
"_____no_output_____"
]
],
[
[
"mlp.train(reviews[:-1000],labels[:-1000])",
"_____no_output_____"
]
],
[
[
"That most likely didn't train very well. Part of the reason may be because the learning rate is too high. Run the following cell to recreate the network with a smaller learning rate, `0.01`, and then train the new network.",
"_____no_output_____"
]
],
[
[
"mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.01)\nmlp.train(reviews[:-1000],labels[:-1000])",
"_____no_output_____"
]
],
[
[
"That probably wasn't much different. Run the following cell to recreate the network one more time with an even smaller learning rate, `0.001`, and then train the new network.",
"_____no_output_____"
]
],
[
[
"mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.001)\nmlp.train(reviews[:-1000],labels[:-1000])",
"_____no_output_____"
]
],
[
[
"With a learning rate of `0.001`, the network should finall have started to improve during training. It's still not very good, but it shows that this solution has potential. We will improve it in the next lesson.",
"_____no_output_____"
],
[
"# End of Project 3. \n## Watch the next video to see Andrew's solution, then continue on to the next lesson.",
"_____no_output_____"
],
[
"# Understanding Neural Noise<a id='lesson_4'></a>\n\nThe following cells include includes the code Andrew shows in the next video. We've included it here so you can run the cells along with the video without having to type in everything.",
"_____no_output_____"
]
],
[
[
"from IPython.display import Image\nImage(filename='sentiment_network.png')",
"_____no_output_____"
],
[
"def update_input_layer(review):\n \n global layer_0\n \n # clear out previous state, reset the layer to be all 0s\n layer_0 *= 0\n for word in review.split(\" \"):\n layer_0[0][word2index[word]] += 1\n\nupdate_input_layer(reviews[0])",
"_____no_output_____"
],
[
"layer_0",
"_____no_output_____"
],
[
"review_counter = Counter()",
"_____no_output_____"
],
[
"for word in reviews[0].split(\" \"):\n review_counter[word] += 1",
"_____no_output_____"
],
[
"review_counter.most_common()",
"_____no_output_____"
]
],
[
[
"# Project 4: Reducing Noise in Our Input Data<a id='project_4'></a>\n\n**TODO:** Attempt to reduce the noise in the input data like Andrew did in the previous video. Specifically, do the following:\n* Copy the `SentimentNetwork` class you created earlier into the following cell.\n* Modify `update_input_layer` so it does not count how many times each word is used, but rather just stores whether or not a word was used. ",
"_____no_output_____"
]
],
[
[
"# TODO: -Copy the SentimentNetwork class from Projet 3 lesson\n# -Modify it to reduce noise, like in the video ",
"_____no_output_____"
]
],
[
[
"Run the following cell to recreate the network and train it. Notice we've gone back to the higher learning rate of `0.1`.",
"_____no_output_____"
]
],
[
[
"mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1)\nmlp.train(reviews[:-1000],labels[:-1000])",
"_____no_output_____"
]
],
[
[
"That should have trained much better than the earlier attempts. It's still not wonderful, but it should have improved dramatically. Run the following cell to test your model with 1000 predictions.",
"_____no_output_____"
]
],
[
[
"mlp.test(reviews[-1000:],labels[-1000:])",
"_____no_output_____"
]
],
[
[
"# End of Project 4. \n## Andrew's solution was actually in the previous video, so rewatch that video if you had any problems with that project. Then continue on to the next lesson.\n# Analyzing Inefficiencies in our Network<a id='lesson_5'></a>\nThe following cells include the code Andrew shows in the next video. We've included it here so you can run the cells along with the video without having to type in everything.",
"_____no_output_____"
]
],
[
[
"Image(filename='sentiment_network_sparse.png')",
"_____no_output_____"
],
[
"layer_0 = np.zeros(10)",
"_____no_output_____"
],
[
"layer_0",
"_____no_output_____"
],
[
"layer_0[4] = 1\nlayer_0[9] = 1",
"_____no_output_____"
],
[
"layer_0",
"_____no_output_____"
],
[
"weights_0_1 = np.random.randn(10,5)",
"_____no_output_____"
],
[
"layer_0.dot(weights_0_1)",
"_____no_output_____"
],
[
"indices = [4,9]",
"_____no_output_____"
],
[
"layer_1 = np.zeros(5)",
"_____no_output_____"
],
[
"for index in indices:\n layer_1 += (1 * weights_0_1[index])",
"_____no_output_____"
],
[
"layer_1",
"_____no_output_____"
],
[
"Image(filename='sentiment_network_sparse_2.png')",
"_____no_output_____"
],
[
"layer_1 = np.zeros(5)",
"_____no_output_____"
],
[
"for index in indices:\n layer_1 += (weights_0_1[index])",
"_____no_output_____"
],
[
"layer_1",
"_____no_output_____"
]
],
[
[
"# Project 5: Making our Network More Efficient<a id='project_5'></a>\n**TODO:** Make the `SentimentNetwork` class more efficient by eliminating unnecessary multiplications and additions that occur during forward and backward propagation. To do that, you can do the following:\n* Copy the `SentimentNetwork` class from the previous project into the following cell.\n* Remove the `update_input_layer` function - you will not need it in this version.\n* Modify `init_network`:\n>* You no longer need a separate input layer, so remove any mention of `self.layer_0`\n>* You will be dealing with the old hidden layer more directly, so create `self.layer_1`, a two-dimensional matrix with shape 1 x hidden_nodes, with all values initialized to zero\n* Modify `train`:\n>* Change the name of the input parameter `training_reviews` to `training_reviews_raw`. This will help with the next step.\n>* At the beginning of the function, you'll want to preprocess your reviews to convert them to a list of indices (from `word2index`) that are actually used in the review. This is equivalent to what you saw in the video when Andrew set specific indices to 1. Your code should create a local `list` variable named `training_reviews` that should contain a `list` for each review in `training_reviews_raw`. Those lists should contain the indices for words found in the review.\n>* Remove call to `update_input_layer`\n>* Use `self`'s `layer_1` instead of a local `layer_1` object.\n>* In the forward pass, replace the code that updates `layer_1` with new logic that only adds the weights for the indices used in the review.\n>* When updating `weights_0_1`, only update the individual weights that were used in the forward pass.\n* Modify `run`:\n>* Remove call to `update_input_layer` \n>* Use `self`'s `layer_1` instead of a local `layer_1` object.\n>* Much like you did in `train`, you will need to pre-process the `review` so you can work with word indices, then update `layer_1` by adding weights for the indices used in the review.",
"_____no_output_____"
]
],
[
[
"# TODO: -Copy the SentimentNetwork class from Project 4 lesson\n# -Modify it according to the above instructions ",
"_____no_output_____"
]
],
[
[
"Run the following cell to recreate the network and train it once again.",
"_____no_output_____"
]
],
[
[
"mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1)\nmlp.train(reviews[:-1000],labels[:-1000])",
"_____no_output_____"
]
],
[
[
"That should have trained much better than the earlier attempts. Run the following cell to test your model with 1000 predictions.",
"_____no_output_____"
]
],
[
[
"mlp.test(reviews[-1000:],labels[-1000:])",
"_____no_output_____"
]
],
[
[
"# End of Project 5. \n## Watch the next video to see Andrew's solution, then continue on to the next lesson.\n# Further Noise Reduction<a id='lesson_6'></a>",
"_____no_output_____"
]
],
[
[
"Image(filename='sentiment_network_sparse_2.png')",
"_____no_output_____"
],
[
"# words most frequently seen in a review with a \"POSITIVE\" label\npos_neg_ratios.most_common()",
"_____no_output_____"
],
[
"# words most frequently seen in a review with a \"NEGATIVE\" label\nlist(reversed(pos_neg_ratios.most_common()))[0:30]",
"_____no_output_____"
],
[
"from bokeh.models import ColumnDataSource, LabelSet\nfrom bokeh.plotting import figure, show, output_file\nfrom bokeh.io import output_notebook\noutput_notebook()",
"_____no_output_____"
],
[
"hist, edges = np.histogram(list(map(lambda x:x[1],pos_neg_ratios.most_common())), density=True, bins=100, normed=True)\n\np = figure(tools=\"pan,wheel_zoom,reset,save\",\n toolbar_location=\"above\",\n title=\"Word Positive/Negative Affinity Distribution\")\np.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:], line_color=\"#555555\")\nshow(p)",
"_____no_output_____"
],
[
"frequency_frequency = Counter()\n\nfor word, cnt in total_counts.most_common():\n frequency_frequency[cnt] += 1",
"_____no_output_____"
],
[
"hist, edges = np.histogram(list(map(lambda x:x[1],frequency_frequency.most_common())), density=True, bins=100, normed=True)\n\np = figure(tools=\"pan,wheel_zoom,reset,save\",\n toolbar_location=\"above\",\n title=\"The frequency distribution of the words in our corpus\")\np.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:], line_color=\"#555555\")\nshow(p)",
"_____no_output_____"
]
],
[
[
"# Project 6: Reducing Noise by Strategically Reducing the Vocabulary<a id='project_6'></a>\n\n**TODO:** Improve `SentimentNetwork`'s performance by reducing more noise in the vocabulary. Specifically, do the following:\n* Copy the `SentimentNetwork` class from the previous project into the following cell.\n* Modify `pre_process_data`:\n>* Add two additional parameters: `min_count` and `polarity_cutoff`\n>* Calculate the positive-to-negative ratios of words used in the reviews. (You can use code you've written elsewhere in the notebook, but we are moving it into the class like we did with other helper code earlier.)\n>* Andrew's solution only calculates a postive-to-negative ratio for words that occur at least 50 times. This keeps the network from attributing too much sentiment to rarer words. You can choose to add this to your solution if you would like. \n>* Change so words are only added to the vocabulary if they occur in the vocabulary more than `min_count` times.\n>* Change so words are only added to the vocabulary if the absolute value of their postive-to-negative ratio is at least `polarity_cutoff`\n* Modify `__init__`:\n>* Add the same two parameters (`min_count` and `polarity_cutoff`) and use them when you call `pre_process_data`",
"_____no_output_____"
]
],
[
[
"# TODO: -Copy the SentimentNetwork class from Project 5 lesson\n# -Modify it according to the above instructions ",
"_____no_output_____"
]
],
[
[
"Run the following cell to train your network with a small polarity cutoff.",
"_____no_output_____"
]
],
[
[
"mlp = SentimentNetwork(reviews[:-1000],labels[:-1000],min_count=20,polarity_cutoff=0.05,learning_rate=0.01)\nmlp.train(reviews[:-1000],labels[:-1000])",
"_____no_output_____"
]
],
[
[
"And run the following cell to test it's performance. It should be ",
"_____no_output_____"
]
],
[
[
"mlp.test(reviews[-1000:],labels[-1000:])",
"_____no_output_____"
]
],
[
[
"Run the following cell to train your network with a much larger polarity cutoff.",
"_____no_output_____"
]
],
[
[
"mlp = SentimentNetwork(reviews[:-1000],labels[:-1000],min_count=20,polarity_cutoff=0.8,learning_rate=0.01)\nmlp.train(reviews[:-1000],labels[:-1000])",
"_____no_output_____"
]
],
[
[
"And run the following cell to test it's performance.",
"_____no_output_____"
]
],
[
[
"mlp.test(reviews[-1000:],labels[-1000:])",
"_____no_output_____"
]
],
[
[
"# End of Project 6. \n## Watch the next video to see Andrew's solution, then continue on to the next lesson.",
"_____no_output_____"
],
[
"# Analysis: What's Going on in the Weights?<a id='lesson_7'></a>",
"_____no_output_____"
]
],
[
[
"mlp_full = SentimentNetwork(reviews[:-1000],labels[:-1000],min_count=0,polarity_cutoff=0,learning_rate=0.01)",
"_____no_output_____"
],
[
"mlp_full.train(reviews[:-1000],labels[:-1000])",
"_____no_output_____"
],
[
"Image(filename='sentiment_network_sparse.png')",
"_____no_output_____"
],
[
"def get_most_similar_words(focus = \"horrible\"):\n most_similar = Counter()\n\n for word in mlp_full.word2index.keys():\n most_similar[word] = np.dot(mlp_full.weights_0_1[mlp_full.word2index[word]],mlp_full.weights_0_1[mlp_full.word2index[focus]])\n \n return most_similar.most_common()",
"_____no_output_____"
],
[
"get_most_similar_words(\"excellent\")",
"_____no_output_____"
],
[
"get_most_similar_words(\"terrible\")",
"_____no_output_____"
],
[
"import matplotlib.colors as colors\n\nwords_to_visualize = list()\nfor word, ratio in pos_neg_ratios.most_common(500):\n if(word in mlp_full.word2index.keys()):\n words_to_visualize.append(word)\n \nfor word, ratio in list(reversed(pos_neg_ratios.most_common()))[0:500]:\n if(word in mlp_full.word2index.keys()):\n words_to_visualize.append(word)",
"_____no_output_____"
],
[
"pos = 0\nneg = 0\n\ncolors_list = list()\nvectors_list = list()\nfor word in words_to_visualize:\n if word in pos_neg_ratios.keys():\n vectors_list.append(mlp_full.weights_0_1[mlp_full.word2index[word]])\n if(pos_neg_ratios[word] > 0):\n pos+=1\n colors_list.append(\"#00ff00\")\n else:\n neg+=1\n colors_list.append(\"#000000\")",
"_____no_output_____"
],
[
"from sklearn.manifold import TSNE\ntsne = TSNE(n_components=2, random_state=0)\nwords_top_ted_tsne = tsne.fit_transform(vectors_list)",
"_____no_output_____"
],
[
"p = figure(tools=\"pan,wheel_zoom,reset,save\",\n toolbar_location=\"above\",\n title=\"vector T-SNE for most polarized words\")\n\nsource = ColumnDataSource(data=dict(x1=words_top_ted_tsne[:,0],\n x2=words_top_ted_tsne[:,1],\n names=words_to_visualize,\n color=colors_list))\n\np.scatter(x=\"x1\", y=\"x2\", size=8, source=source, fill_color=\"color\")\n\nword_labels = LabelSet(x=\"x1\", y=\"x2\", text=\"names\", y_offset=6,\n text_font_size=\"8pt\", text_color=\"#555555\",\n source=source, text_align='center')\np.add_layout(word_labels)\n\nshow(p)\n\n# green indicates positive words, black indicates negative words",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a18d8f2471ff19b0c56f266cbdfaf8fbf975428
| 172,495 |
ipynb
|
Jupyter Notebook
|
CNN.ipynb
|
JSerowik/CPE646_Project
|
b110d6fef7a4c501bcdc326b0027ada45a725521
|
[
"MIT"
] | null | null | null |
CNN.ipynb
|
JSerowik/CPE646_Project
|
b110d6fef7a4c501bcdc326b0027ada45a725521
|
[
"MIT"
] | null | null | null |
CNN.ipynb
|
JSerowik/CPE646_Project
|
b110d6fef7a4c501bcdc326b0027ada45a725521
|
[
"MIT"
] | null | null | null | 227.26614 | 65,112 | 0.876976 |
[
[
[
"# CPE 646 Final Project\n## Live Memetic Detection",
"_____no_output_____"
]
],
[
[
"import os\nfrom PIL import Image\nimport numpy as np\nfrom numpy import *\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib\n\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout, Activation, Flatten\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D\nfrom keras.optimizers import SGD,RMSprop,adam\nfrom keras.utils import np_utils\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.optimizers import Adam\nfrom keras.preprocessing.image import ImageDataGenerator\n\nfrom sklearn.utils import shuffle\nfrom sklearn.model_selection import train_test_split",
"Using TensorFlow backend.\n"
],
[
"# input image dimensions\nimg_rows = 150\nimg_cols = 150\n\n# number of channels\nimg_channels = 1\n\npath1 = './images/train' #path of folder of images \npath2 = './data/train' #path of folder to save images \n\nlisting = os.listdir(path1)\nnum_samples=size(listing)\nprint(num_samples)\n\n# Resize and grey images before saving them in data folder\nfor file in listing:\n im = Image.open(path1 + '/' + file) \n img = im.resize((img_rows,img_cols))\n gray = img.convert('L') \n gray.save(path2 +'/' + file, \"JPEG\")\n \nimlist = os.listdir(path2)\n\nim1 = array(Image.open(path2 + '/'+ imlist[0])) # open one image to get size\nm,n = im1.shape[0:2] # get the size of the images\nimnbr = len(imlist) # get the number of images",
"1513\n"
],
[
"# create matrix to store all flattened images\nimmatrix = array([array(Image.open(path2 + '/' + im2)).flatten()\n for im2 in imlist],'f')\n# Create array of labels and go through image name to dtermine class\nlabel=np.ones((num_samples,),dtype = int)\nindex = 0\nfor im in imlist:\n if 'doge' in im:\n label[index] = 1\n index += 1\n else:\n label[index] = 0\n index += 1\n# Shuffle data as to reduce data skew\ndata,Label = shuffle(immatrix,label, random_state=2)\ntrain_data = [data,Label]\n# Output image to check \nimg=immatrix[32].reshape(img_rows,img_cols)\nplt.imshow(img)\nplt.imshow(img,cmap='gray')\nplt.title('Class '+ str(label[32]))\nprint(train_data[0].shape)\nprint(train_data[1].shape)",
"(1513, 22500)\n(1513,)\n"
],
[
"# number of output classes\nnb_classes = 2\n# Organize data into sample and label\n(X, y) = (train_data[0],train_data[1])",
"_____no_output_____"
],
[
"# Split X and y into training and testing sets\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=4)\n\nX_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)\nX_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)\n\nX_train = X_train.astype('float32')\nX_test = X_test.astype('float32')\n\nX_train /= 255\nX_test /= 255\n\nprint('X_train shape:', X_train.shape)\nprint(X_train.shape[0], 'train samples')\nprint(X_test.shape[0], 'test samples')\n",
"('X_train shape:', (1210, 150, 150, 1))\n(1210, 'train samples')\n(303, 'test samples')\n"
],
[
"# convert class vectors to binary class matrices\nY_train = np_utils.to_categorical(y_train, nb_classes)\nY_test = np_utils.to_categorical(y_test, nb_classes)\n# Output to see if working\nimg = X_train[10].reshape(img_rows,img_cols)\nplt.imshow(img)\nplt.imshow(img,cmap='gray')",
"_____no_output_____"
],
[
"model = Sequential()\n# Loosely based on Alexnet with stacked conv. layers\nmodel.add(Conv2D(32, (3, 3), input_shape=(img_rows, img_cols,1)))\nmodel.add(Activation('relu'))\nmodel.add(Conv2D(32, (3, 3)))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(4,4)))\n\nmodel.add(Conv2D(64,(3, 3)))\nmodel.add(Activation('relu'))\nmodel.add(Conv2D(64, (3, 3)))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\n# Flatten before inputing into NN\nmodel.add(Flatten())\n# Fully connected layers\nmodel.add(Dense(512))\nmodel.add(Activation('relu'))\nmodel.add(Dense(256))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(2))\n\nmodel.add(Activation('softmax'))\n\nmodel.compile(loss='categorical_crossentropy', optimizer=Adam(), metrics=['accuracy'])\nmodel.summary()",
"_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d_9 (Conv2D) (None, 148, 148, 32) 320 \n_________________________________________________________________\nactivation_15 (Activation) (None, 148, 148, 32) 0 \n_________________________________________________________________\nconv2d_10 (Conv2D) (None, 146, 146, 32) 9248 \n_________________________________________________________________\nactivation_16 (Activation) (None, 146, 146, 32) 0 \n_________________________________________________________________\nmax_pooling2d_5 (MaxPooling2 (None, 36, 36, 32) 0 \n_________________________________________________________________\nconv2d_11 (Conv2D) (None, 34, 34, 64) 18496 \n_________________________________________________________________\nactivation_17 (Activation) (None, 34, 34, 64) 0 \n_________________________________________________________________\nconv2d_12 (Conv2D) (None, 32, 32, 64) 36928 \n_________________________________________________________________\nactivation_18 (Activation) (None, 32, 32, 64) 0 \n_________________________________________________________________\nmax_pooling2d_6 (MaxPooling2 (None, 16, 16, 64) 0 \n_________________________________________________________________\nflatten_3 (Flatten) (None, 16384) 0 \n_________________________________________________________________\ndense_7 (Dense) (None, 512) 8389120 \n_________________________________________________________________\nactivation_19 (Activation) (None, 512) 0 \n_________________________________________________________________\ndense_8 (Dense) (None, 256) 131328 \n_________________________________________________________________\nactivation_20 (Activation) (None, 256) 0 \n_________________________________________________________________\ndropout_3 (Dropout) (None, 256) 0 \n_________________________________________________________________\ndense_9 (Dense) (None, 2) 514 \n_________________________________________________________________\nactivation_21 (Activation) (None, 2) 0 \n=================================================================\nTotal params: 8,585,954\nTrainable params: 8,585,954\nNon-trainable params: 0\n_________________________________________________________________\nNone\n"
],
[
"gen = ImageDataGenerator(rotation_range=8, width_shift_range=0.08, shear_range=0.3,\n height_shift_range=0.08, zoom_range=0.08)\n\ntest_gen = ImageDataGenerator()\nbatch = 128\ntrain_generator = gen.flow(X_train, Y_train, batch_size=batch)\ntest_generator = test_gen.flow(X_test, Y_test, batch_size=batch)",
"_____no_output_____"
],
[
"model.fit_generator(train_generator, steps_per_epoch=X_train.shape[0]//batch, epochs=100, \n validation_data=test_generator, validation_steps=X_test.shape[0]//batch)",
"Epoch 1/100\n9/9 [==============================] - 2s 218ms/step - loss: 0.6740 - acc: 0.6257 - val_loss: 0.6286 - val_acc: 0.6914\nEpoch 2/100\n9/9 [==============================] - 1s 157ms/step - loss: 0.6526 - acc: 0.6596 - val_loss: 0.6239 - val_acc: 0.6914\nEpoch 3/100\n9/9 [==============================] - 2s 172ms/step - loss: 0.6316 - acc: 0.6737 - val_loss: 0.6080 - val_acc: 0.6914\nEpoch 4/100\n9/9 [==============================] - 2s 181ms/step - loss: 0.6005 - acc: 0.6892 - val_loss: 0.6170 - val_acc: 0.6641\nEpoch 5/100\n9/9 [==============================] - 1s 163ms/step - loss: 0.5821 - acc: 0.7196 - val_loss: 0.5699 - val_acc: 0.7227\nEpoch 6/100\n9/9 [==============================] - 2s 174ms/step - loss: 0.5886 - acc: 0.7038 - val_loss: 0.5450 - val_acc: 0.7344\nEpoch 7/100\n9/9 [==============================] - 2s 172ms/step - loss: 0.5451 - acc: 0.7202 - val_loss: 0.5285 - val_acc: 0.7383\nEpoch 8/100\n9/9 [==============================] - 2s 174ms/step - loss: 0.5357 - acc: 0.7307 - val_loss: 0.5191 - val_acc: 0.7383\nEpoch 9/100\n9/9 [==============================] - 2s 173ms/step - loss: 0.5313 - acc: 0.7363 - val_loss: 0.4883 - val_acc: 0.7617\nEpoch 10/100\n9/9 [==============================] - 1s 156ms/step - loss: 0.5025 - acc: 0.7570 - val_loss: 0.4894 - val_acc: 0.7617\nEpoch 11/100\n9/9 [==============================] - 1s 158ms/step - loss: 0.4854 - acc: 0.7664 - val_loss: 0.4979 - val_acc: 0.7695\nEpoch 12/100\n9/9 [==============================] - 1s 158ms/step - loss: 0.4717 - acc: 0.7748 - val_loss: 0.4641 - val_acc: 0.7969\nEpoch 13/100\n9/9 [==============================] - 2s 169ms/step - loss: 0.4504 - acc: 0.7856 - val_loss: 0.4607 - val_acc: 0.8086\nEpoch 14/100\n9/9 [==============================] - 1s 165ms/step - loss: 0.4810 - acc: 0.7782 - val_loss: 0.4499 - val_acc: 0.7812\nEpoch 15/100\n9/9 [==============================] - 2s 173ms/step - loss: 0.4359 - acc: 0.7932 - val_loss: 0.4640 - val_acc: 0.7852\nEpoch 16/100\n9/9 [==============================] - 2s 182ms/step - loss: 0.4043 - acc: 0.8255 - val_loss: 0.4685 - val_acc: 0.8086\nEpoch 17/100\n9/9 [==============================] - 2s 173ms/step - loss: 0.3995 - acc: 0.8131 - val_loss: 0.4144 - val_acc: 0.8359\nEpoch 18/100\n9/9 [==============================] - 1s 162ms/step - loss: 0.3801 - acc: 0.8300 - val_loss: 0.4040 - val_acc: 0.8320\nEpoch 19/100\n9/9 [==============================] - 1s 163ms/step - loss: 0.3559 - acc: 0.8555 - val_loss: 0.4088 - val_acc: 0.8516\nEpoch 20/100\n9/9 [==============================] - 1s 159ms/step - loss: 0.3526 - acc: 0.8437 - val_loss: 0.3771 - val_acc: 0.8594\nEpoch 21/100\n9/9 [==============================] - 1s 157ms/step - loss: 0.3137 - acc: 0.8800 - val_loss: 0.3815 - val_acc: 0.8633\nEpoch 22/100\n9/9 [==============================] - 1s 165ms/step - loss: 0.3163 - acc: 0.8727 - val_loss: 0.3473 - val_acc: 0.8672\nEpoch 23/100\n9/9 [==============================] - 1s 163ms/step - loss: 0.2920 - acc: 0.8806 - val_loss: 0.3868 - val_acc: 0.8477\nEpoch 24/100\n9/9 [==============================] - 2s 182ms/step - loss: 0.2937 - acc: 0.8767 - val_loss: 0.4180 - val_acc: 0.8594\nEpoch 25/100\n9/9 [==============================] - 1s 161ms/step - loss: 0.3000 - acc: 0.8797 - val_loss: 0.4017 - val_acc: 0.8633\nEpoch 26/100\n9/9 [==============================] - 2s 174ms/step - loss: 0.3288 - acc: 0.8612 - val_loss: 0.4417 - val_acc: 0.8125\nEpoch 27/100\n9/9 [==============================] - 1s 163ms/step - loss: 0.2673 - acc: 0.8926 - val_loss: 0.4744 - val_acc: 0.8633\nEpoch 28/100\n9/9 [==============================] - 1s 161ms/step - loss: 0.2737 - acc: 0.8868 - val_loss: 0.4650 - val_acc: 0.8594\nEpoch 29/100\n9/9 [==============================] - 1s 162ms/step - loss: 0.2600 - acc: 0.9000 - val_loss: 0.4018 - val_acc: 0.8555\nEpoch 30/100\n9/9 [==============================] - 1s 157ms/step - loss: 0.2557 - acc: 0.8983 - val_loss: 0.4588 - val_acc: 0.8047\nEpoch 31/100\n9/9 [==============================] - 1s 158ms/step - loss: 0.2513 - acc: 0.8949 - val_loss: 0.4866 - val_acc: 0.8438\nEpoch 32/100\n9/9 [==============================] - 1s 157ms/step - loss: 0.2410 - acc: 0.9051 - val_loss: 0.3835 - val_acc: 0.8672\nEpoch 33/100\n9/9 [==============================] - 1s 161ms/step - loss: 0.2001 - acc: 0.9204 - val_loss: 0.4222 - val_acc: 0.8438\nEpoch 34/100\n9/9 [==============================] - 2s 169ms/step - loss: 0.2028 - acc: 0.9149 - val_loss: 0.4375 - val_acc: 0.8711\nEpoch 35/100\n9/9 [==============================] - 1s 155ms/step - loss: 0.1890 - acc: 0.9246 - val_loss: 0.4523 - val_acc: 0.8359\nEpoch 36/100\n9/9 [==============================] - 2s 172ms/step - loss: 0.1638 - acc: 0.9334 - val_loss: 0.4806 - val_acc: 0.8555\nEpoch 37/100\n9/9 [==============================] - 2s 180ms/step - loss: 0.1769 - acc: 0.9253 - val_loss: 0.4116 - val_acc: 0.8438\nEpoch 38/100\n9/9 [==============================] - 1s 162ms/step - loss: 0.1581 - acc: 0.9352 - val_loss: 0.4975 - val_acc: 0.8594\nEpoch 39/100\n9/9 [==============================] - 2s 174ms/step - loss: 0.1577 - acc: 0.9385 - val_loss: 0.4710 - val_acc: 0.8516\nEpoch 40/100\n9/9 [==============================] - 1s 158ms/step - loss: 0.1362 - acc: 0.9491 - val_loss: 0.4999 - val_acc: 0.8438\nEpoch 41/100\n9/9 [==============================] - 1s 159ms/step - loss: 0.1219 - acc: 0.9560 - val_loss: 0.5848 - val_acc: 0.8672\nEpoch 42/100\n9/9 [==============================] - 1s 165ms/step - loss: 0.1152 - acc: 0.9543 - val_loss: 0.5695 - val_acc: 0.8398\nEpoch 43/100\n9/9 [==============================] - 1s 162ms/step - loss: 0.1183 - acc: 0.9535 - val_loss: 0.6022 - val_acc: 0.8594\nEpoch 44/100\n9/9 [==============================] - 2s 173ms/step - loss: 0.1245 - acc: 0.9528 - val_loss: 0.5793 - val_acc: 0.8555\nEpoch 45/100\n9/9 [==============================] - 2s 175ms/step - loss: 0.1026 - acc: 0.9604 - val_loss: 0.6476 - val_acc: 0.8398\nEpoch 46/100\n9/9 [==============================] - 2s 171ms/step - loss: 0.1021 - acc: 0.9569 - val_loss: 0.6277 - val_acc: 0.8672\nEpoch 47/100\n9/9 [==============================] - 2s 171ms/step - loss: 0.1156 - acc: 0.9557 - val_loss: 0.6161 - val_acc: 0.8203\nEpoch 48/100\n9/9 [==============================] - 2s 173ms/step - loss: 0.1104 - acc: 0.9624 - val_loss: 0.5877 - val_acc: 0.8633\nEpoch 49/100\n9/9 [==============================] - 2s 172ms/step - loss: 0.0930 - acc: 0.9581 - val_loss: 0.4931 - val_acc: 0.8516\nEpoch 50/100\n9/9 [==============================] - 1s 158ms/step - loss: 0.1076 - acc: 0.9589 - val_loss: 0.5745 - val_acc: 0.8555\nEpoch 51/100\n9/9 [==============================] - 1s 157ms/step - loss: 0.0746 - acc: 0.9710 - val_loss: 0.5201 - val_acc: 0.8555\nEpoch 52/100\n9/9 [==============================] - 1s 165ms/step - loss: 0.0849 - acc: 0.9738 - val_loss: 0.5260 - val_acc: 0.8555\nEpoch 53/100\n9/9 [==============================] - 1s 166ms/step - loss: 0.0669 - acc: 0.9800 - val_loss: 0.6075 - val_acc: 0.8633\nEpoch 54/100\n9/9 [==============================] - 1s 161ms/step - loss: 0.0665 - acc: 0.9725 - val_loss: 0.6574 - val_acc: 0.8711\nEpoch 55/100\n9/9 [==============================] - 1s 162ms/step - loss: 0.0702 - acc: 0.9738 - val_loss: 0.5590 - val_acc: 0.8555\nEpoch 56/100\n9/9 [==============================] - 2s 181ms/step - loss: 0.0654 - acc: 0.9766 - val_loss: 0.6375 - val_acc: 0.8633\nEpoch 57/100\n9/9 [==============================] - 1s 165ms/step - loss: 0.0558 - acc: 0.9760 - val_loss: 0.6469 - val_acc: 0.8516\nEpoch 58/100\n9/9 [==============================] - 2s 181ms/step - loss: 0.0843 - acc: 0.9705 - val_loss: 0.6352 - val_acc: 0.8633\nEpoch 59/100\n9/9 [==============================] - 1s 165ms/step - loss: 0.1319 - acc: 0.9533 - val_loss: 0.7170 - val_acc: 0.8281\nEpoch 60/100\n9/9 [==============================] - 1s 158ms/step - loss: 0.1026 - acc: 0.9605 - val_loss: 0.5876 - val_acc: 0.8555\nEpoch 61/100\n9/9 [==============================] - 1s 157ms/step - loss: 0.1115 - acc: 0.9623 - val_loss: 0.5732 - val_acc: 0.8555\nEpoch 62/100\n9/9 [==============================] - 2s 174ms/step - loss: 0.0534 - acc: 0.9835 - val_loss: 0.6888 - val_acc: 0.8633\nEpoch 63/100\n9/9 [==============================] - 1s 164ms/step - loss: 0.0665 - acc: 0.9788 - val_loss: 0.6678 - val_acc: 0.8516\nEpoch 64/100\n9/9 [==============================] - 1s 162ms/step - loss: 0.0473 - acc: 0.9807 - val_loss: 0.7225 - val_acc: 0.8672\nEpoch 65/100\n9/9 [==============================] - 2s 170ms/step - loss: 0.0434 - acc: 0.9870 - val_loss: 0.7810 - val_acc: 0.8398\nEpoch 66/100\n9/9 [==============================] - 2s 173ms/step - loss: 0.0618 - acc: 0.9790 - val_loss: 0.7798 - val_acc: 0.8438\nEpoch 67/100\n9/9 [==============================] - 2s 173ms/step - loss: 0.0695 - acc: 0.9729 - val_loss: 0.7802 - val_acc: 0.8242\nEpoch 68/100\n9/9 [==============================] - 1s 162ms/step - loss: 0.0532 - acc: 0.9823 - val_loss: 0.8218 - val_acc: 0.8281\nEpoch 69/100\n9/9 [==============================] - 1s 155ms/step - loss: 0.0927 - acc: 0.9667 - val_loss: 0.8545 - val_acc: 0.8086\nEpoch 70/100\n9/9 [==============================] - 1s 155ms/step - loss: 0.0884 - acc: 0.9693 - val_loss: 0.7512 - val_acc: 0.8320\nEpoch 71/100\n9/9 [==============================] - 1s 155ms/step - loss: 0.0713 - acc: 0.9720 - val_loss: 0.7554 - val_acc: 0.8242\nEpoch 72/100\n9/9 [==============================] - 1s 163ms/step - loss: 0.0577 - acc: 0.9809 - val_loss: 0.7066 - val_acc: 0.8789\nEpoch 73/100\n9/9 [==============================] - 1s 155ms/step - loss: 0.0761 - acc: 0.9778 - val_loss: 0.7429 - val_acc: 0.8477\nEpoch 74/100\n9/9 [==============================] - 2s 180ms/step - loss: 0.0508 - acc: 0.9835 - val_loss: 0.7909 - val_acc: 0.8516\nEpoch 75/100\n9/9 [==============================] - 1s 166ms/step - loss: 0.0561 - acc: 0.9814 - val_loss: 0.8889 - val_acc: 0.8516\nEpoch 76/100\n9/9 [==============================] - 2s 174ms/step - loss: 0.0715 - acc: 0.9762 - val_loss: 0.7457 - val_acc: 0.8594\nEpoch 77/100\n9/9 [==============================] - 2s 171ms/step - loss: 0.0440 - acc: 0.9878 - val_loss: 0.7462 - val_acc: 0.8555\nEpoch 78/100\n9/9 [==============================] - 2s 172ms/step - loss: 0.0318 - acc: 0.9895 - val_loss: 0.7924 - val_acc: 0.8438\nEpoch 79/100\n9/9 [==============================] - 1s 158ms/step - loss: 0.0285 - acc: 0.9876 - val_loss: 0.8646 - val_acc: 0.8516\nEpoch 80/100\n9/9 [==============================] - 1s 163ms/step - loss: 0.0385 - acc: 0.9852 - val_loss: 0.9015 - val_acc: 0.8438\nEpoch 81/100\n9/9 [==============================] - 1s 155ms/step - loss: 0.0286 - acc: 0.9938 - val_loss: 1.0718 - val_acc: 0.8516\nEpoch 82/100\n9/9 [==============================] - 1s 165ms/step - loss: 0.0386 - acc: 0.9851 - val_loss: 0.9560 - val_acc: 0.8477\nEpoch 83/100\n9/9 [==============================] - 2s 171ms/step - loss: 0.0466 - acc: 0.9878 - val_loss: 0.8969 - val_acc: 0.8516\nEpoch 84/100\n9/9 [==============================] - 1s 165ms/step - loss: 0.0428 - acc: 0.9893 - val_loss: 0.9254 - val_acc: 0.8555\nEpoch 85/100\n9/9 [==============================] - 1s 163ms/step - loss: 0.0442 - acc: 0.9869 - val_loss: 0.8137 - val_acc: 0.8555\nEpoch 86/100\n9/9 [==============================] - 1s 163ms/step - loss: 0.0199 - acc: 0.9904 - val_loss: 0.8768 - val_acc: 0.8516\nEpoch 87/100\n9/9 [==============================] - 2s 173ms/step - loss: 0.0230 - acc: 0.9930 - val_loss: 0.8971 - val_acc: 0.8516\nEpoch 88/100\n9/9 [==============================] - 2s 181ms/step - loss: 0.0277 - acc: 0.9896 - val_loss: 0.8524 - val_acc: 0.8594\nEpoch 89/100\n9/9 [==============================] - 2s 174ms/step - loss: 0.0268 - acc: 0.9886 - val_loss: 0.9967 - val_acc: 0.8516\nEpoch 90/100\n9/9 [==============================] - 1s 154ms/step - loss: 0.0296 - acc: 0.9921 - val_loss: 0.9092 - val_acc: 0.8438\nEpoch 91/100\n9/9 [==============================] - 1s 155ms/step - loss: 0.0188 - acc: 0.9921 - val_loss: 1.0338 - val_acc: 0.8242\nEpoch 92/100\n9/9 [==============================] - 2s 171ms/step - loss: 0.0192 - acc: 0.9920 - val_loss: 0.9614 - val_acc: 0.8477\nEpoch 93/100\n9/9 [==============================] - 2s 173ms/step - loss: 0.0379 - acc: 0.9913 - val_loss: 0.9819 - val_acc: 0.8438\nEpoch 94/100\n9/9 [==============================] - 1s 154ms/step - loss: 0.0258 - acc: 0.9913 - val_loss: 1.0797 - val_acc: 0.8125\nEpoch 95/100\n9/9 [==============================] - 2s 167ms/step - loss: 0.0411 - acc: 0.9887 - val_loss: 0.8752 - val_acc: 0.8359\nEpoch 96/100\n9/9 [==============================] - 1s 155ms/step - loss: 0.0294 - acc: 0.9921 - val_loss: 0.8882 - val_acc: 0.8555\nEpoch 97/100\n9/9 [==============================] - 1s 149ms/step - loss: 0.0183 - acc: 0.9938 - val_loss: 0.8968 - val_acc: 0.8438\nEpoch 98/100\n9/9 [==============================] - 1s 162ms/step - loss: 0.0324 - acc: 0.9896 - val_loss: 0.9289 - val_acc: 0.8242\nEpoch 99/100\n9/9 [==============================] - 1s 162ms/step - loss: 0.0288 - acc: 0.9860 - val_loss: 0.9161 - val_acc: 0.8438\nEpoch 100/100\n9/9 [==============================] - 1s 155ms/step - loss: 0.0280 - acc: 0.9894 - val_loss: 0.8857 - val_acc: 0.8594\n"
],
[
"path3 = './images/test'\npath4 = './data/test'\n\nlistingT = os.listdir(path3)\nnum_samplesT = size(listingT)\nprint(num_samplesT)\n\nfor file in listingT:\n imT = Image.open(path3 + '/' + file) \n imgT = imT.resize((img_rows,img_cols))\n grayT = imgT.convert('L')\n #need to do some more processing here \n grayT.save(path4 +'/' + file, \"JPEG\")\n \nimlistT = os.listdir(path4)\n\nimT = array(Image.open(path4 + '/'+ imlistT[0])) # open one image to get size\nmT,nT = imT.shape[0:2] # get the size of the images\nimnbrT = len(imlistT) # get the number of images",
"24\n"
],
[
"# create matrix to store all flattened images\nimmatrixT = array([array(Image.open(path4 + '/' + imT)).flatten()\n for imT in imlistT],'f')\n# 185 Doge samples, 600 dog samples\nlabelT = np.ones((num_samplesT,),dtype = int)\nindex = 0\nfor imT in imlistT:\n if 'doge' in imT:\n labelT[index] = 1\n index += 1\n else:\n labelT[index] = 0\n index += 1\n#labelT[0:12]=1\n#labelT[13:24]=0\n\ntrain_dataT = [immatrixT,labelT]\n\nimgT=immatrixT[20].reshape(img_rows,img_cols)\nplt.imshow(imgT)\nplt.imshow(imgT,cmap='gray')\nplt.title('Class '+ str(labelT[20]))\nprint(train_dataT[0].shape)\nprint(train_dataT[1].shape)",
"(24, 22500)\n(24,)\n"
],
[
"(XT, yT) = (train_dataT[0],train_dataT[1])\nX_val = XT.reshape(XT.shape[0], img_rows, img_cols, 1)\nY_val = np_utils.to_categorical(yT, nb_classes)\n\nX_val = X_val.astype('float32')\n\nX_val /= 255\n\nprint('X_train shape:', X_val.shape)\nprint(X_val.shape[0], 'train samples')\n\n",
"('X_train shape:', (24, 150, 150, 1))\n(24, 'train samples')\n"
],
[
"score = model.evaluate(X_val, Y_val)\nprint()\nprint('Test accuracy: ', score[1])",
"24/24 [==============================] - 0s 790us/step\n()\n('Test accuracy: ', 0.875)\n"
],
[
"predictions = model.predict_classes(X_val)\n\npredictions = list(predictions)\nactuals = list(Y_val)\n\nsub = pd.DataFrame({'Actual': labelT, 'Predictions': predictions})\nprint(sub)",
" Actual Predictions\n0 1 1\n1 1 1\n2 0 0\n3 1 1\n4 0 0\n5 0 0\n6 1 1\n7 0 0\n8 1 1\n9 0 0\n10 1 1\n11 1 0\n12 0 0\n13 0 0\n14 0 1\n15 0 0\n16 1 1\n17 1 1\n18 1 1\n19 0 0\n20 0 0\n21 0 0\n22 1 1\n23 1 0\n"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a18de2bdd39bc43655fba300409418320b931fb
| 738,622 |
ipynb
|
Jupyter Notebook
|
PEG_analysis/PEG_dmso.ipynb
|
UWPRG/Nance_Enzyme_Encap_MD
|
52d536f7a2bf8b45195f2bc46f36755c792710fd
|
[
"MIT"
] | 1 |
2021-03-15T20:52:01.000Z
|
2021-03-15T20:52:01.000Z
|
PEG_analysis/PEG_dmso.ipynb
|
UWPRG/Nance_Enzyme_Encap_MD
|
52d536f7a2bf8b45195f2bc46f36755c792710fd
|
[
"MIT"
] | null | null | null |
PEG_analysis/PEG_dmso.ipynb
|
UWPRG/Nance_Enzyme_Encap_MD
|
52d536f7a2bf8b45195f2bc46f36755c792710fd
|
[
"MIT"
] | null | null | null | 131.591306 | 83,700 | 0.867825 |
[
[
[
"#import the necessary modules \n%matplotlib inline \nimport numpy as np \nimport matplotlib.pyplot as plt \nimport pandas as pd \nimport scipy\nimport sklearn\nimport itertools \nfrom itertools import cycle \nimport os.path as op\nimport timeit \nimport json\nimport math\n",
"_____no_output_____"
],
[
"import multiprocessing as m_proc\nm_proc.cpu_count()",
"_____no_output_____"
],
[
"# Import MDAnalysis\nimport MDAnalysis as mda\nimport statsmodels as stats\nfrom MDAnalysis.analysis import polymer, distances, rdf\nimport matplotlib.font_manager as font_manager",
"_____no_output_____"
],
[
"from polymer_MD_analysis import pers_length, get_rg_pers_poly, bavg_pers_cnt",
"_____no_output_____"
]
],
[
[
"## PEG/dmso system analysis",
"_____no_output_____"
],
[
"### N = 6 PEG/DMSO ",
"_____no_output_____"
]
],
[
[
"# For the right Rg calculation using MD Analysis, use trajactory without pbc \nn6_peg_dmso = mda.Universe(\"n6peg_dmso/n6pegonly_dmso.pdb\", \"n6peg_dmso/nodmso_n6peg.xtc\")",
"_____no_output_____"
],
[
"n6_peg_dmso.trajectory",
"_____no_output_____"
],
[
"len(n6_peg_dmso.trajectory)",
"_____no_output_____"
],
[
"#Select the polymer heavy atoms \npeg_n6dmso = n6_peg_dmso.select_atoms(\"resname sPEG PEG tPEG and not type H\")",
"_____no_output_____"
],
[
"crv_n6peg_dmso = pers_length(peg_n6dmso,6)\ncrv_n6peg_dmso",
"_____no_output_____"
],
[
"com_bond = np.zeros(shape=(1,18000))\ncount = 0\nfor ts in n6_peg_dmso.trajectory[0:18000]:\n n6_mon1_dmso = n6_peg_dmso.select_atoms(\"resid 1\")\n n6_mon2_dmso = n6_peg_dmso.select_atoms(\"resid 2\")\n oo_len = mda.analysis.distances.distance_array(n6_mon1_dmso.center_of_mass(), n6_mon2_dmso.center_of_mass(), \n box=n6_peg_dmso.trajectory.ts.dimensions)\n com_bond[0, count] = oo_len\n count += 1\n ",
"_____no_output_____"
],
[
"com_bond",
"_____no_output_____"
],
[
"lb_avg_pn6 = np.mean(com_bond)\nlb_avg_pn6",
"_____no_output_____"
],
[
"np.std(com_bond)",
"_____no_output_____"
]
],
[
[
"### Radius of Gyration vs. time N = 6 PEG/dmso",
"_____no_output_____"
]
],
[
[
"n6peg_rgens_dmso, cor_n6peg_dmso, N6peg_cos_dmso, rgdmso_n6peg = get_rg_pers_poly(peg_n6dmso, n6_peg_dmso, 0, 18000)",
"_____no_output_____"
],
[
"n6peg_rgens_dmso[0].shape",
"_____no_output_____"
],
[
"cor_n6peg_dmso[3]",
"_____no_output_____"
],
[
"N6peg_cos_dmso",
"_____no_output_____"
],
[
"rgdmso_n6peg",
"_____no_output_____"
],
[
"np.std(n6peg_rgens_dmso)",
"_____no_output_____"
],
[
"trj_len = np.arange(18000)\n\n#trj_len += 1\ntrj_len\n",
"_____no_output_____"
],
[
"plt.figure(figsize=(7,7))\nplt.title(r'PEG Radius of Gyration', fontsize=18, y=1.01)\nplt.xlabel(r'Time [ns]', fontsize=15)\nplt.ylabel(r'$R_{g}$ [nm]', fontsize=15)\nplt.plot(trj_len/100, n6peg_rgens_dmso[0]/10,linewidth=2, color='#1F2E69')\nplt.tick_params(labelsize=14)\nplt.legend(['N = 6 in DMSO'], frameon=False, fontsize=14)\n#plt.text(127, 0.96,r'N = 6 in water', fontsize=18, color='#1F2E69', family='Arial')\nplt.xlim(0,180)\nplt.ylim(0.2,2)",
"_____no_output_____"
]
],
[
[
"#### Correlation values at each arc length for the whole 180 ns trajectory, N = 6 PEG/dmso",
"_____no_output_____"
]
],
[
[
"# x values\nblen_dmso = cor_n6peg_dmso[3]*lb_avg_pn6\n#nt_tt[0] = 0\nblen_dmso",
"_____no_output_____"
],
[
"# Error prop. into natural log std deviation\nmk_n6p_dmso = cor_n6peg_dmso[1]/cor_n6peg_dmso[0]\nmk_n6p_dmso",
"_____no_output_____"
],
[
"plt.figure(figsize=(7,7))\nplt.errorbar(blen_dmso, np.log(cor_n6peg_dmso[0]), yerr=mk_n6p_dmso, color='b', linestyle=\"None\",marker='o',\n capsize=5, capthick=1, ecolor='black')",
"_____no_output_____"
],
[
"# All the points give the best fits for N = 6 peg in water\nn6_blkspeg_dmso , n6peg_lpdmso = bavg_pers_cnt(5, peg_n6dmso, n6_peg_dmso, lb_avg_pn6, 5, 3000 , 18000)",
"3000\n6000\nLp [Angstroms]: 16.809450819424047\nError in Lp from fit [Angstroms], 95% CL: 3.3301372387120307\nR2 score: 0.9785260835461235\n6000\n9000\nLp [Angstroms]: 25.078841659093637\nError in Lp from fit [Angstroms], 95% CL: 2.1789494274728463\nR2 score: 0.9953958918000745\n9000\n12000\nLp [Angstroms]: 18.201036466028352\nError in Lp from fit [Angstroms], 95% CL: 3.147792972890552\nR2 score: 0.9832619440217341\n12000\n15000\nLp [Angstroms]: 19.792809366209973\nError in Lp from fit [Angstroms], 95% CL: 2.6950216682579993\nR2 score: 0.9892081129886878\n15000\n18000\nLp [Angstroms]: 14.283992614209334\nError in Lp from fit [Angstroms], 95% CL: 2.7294267323481027\nR2 score: 0.9798081995032031\n"
],
[
"n6_blkspeg_dmso",
"_____no_output_____"
],
[
"n6peg_lpdmso",
"_____no_output_____"
],
[
"n6peg_lpdmso[2]",
"_____no_output_____"
],
[
"np.mean(n6peg_lpdmso[3])",
"_____no_output_____"
],
[
"def line_fit(slope, x):\n return slope*x ",
"_____no_output_____"
],
[
"blen_dmso",
"_____no_output_____"
],
[
"gg_n6peg_dmso = line_fit(np.mean(n6peg_lpdmso[2]),blen_dmso)",
"_____no_output_____"
],
[
"gg_n6peg_dmso",
"_____no_output_____"
]
],
[
[
"### Block averaged Radius of gyration and persistence length, N = 6 PEG/DMSO",
"_____no_output_____"
]
],
[
[
"np.mean(n6_blkspeg_dmso[\"Avg persistence length\"])",
"_____no_output_____"
],
[
"np.std(n6_blkspeg_dmso[\"Avg persistence length\"])",
"_____no_output_____"
],
[
"np.mean(n6_blkspeg_dmso[\"Avg Radius of gyration\"])",
"_____no_output_____"
],
[
"np.std(n6_blkspeg_dmso[\"Avg Radius of gyration\"])",
"_____no_output_____"
],
[
"plt.figure(figsize=(7,7))\nplt.errorbar(blen_dmso, np.log(cor_n6peg_dmso[0]), yerr=mk_n6p_dmso, color='#1F2E69', linestyle=\"None\",marker='o',\n capsize=5, capthick=1, ecolor='black')\nplt.plot(blen_dmso, gg_n6peg_dmso, color='#1F2E69')\nplt.title(r'Ensemble Averaged ln(Cosine $\\theta$) in DMSO', fontsize=15, y=1.01)\nplt.xlabel(r'Bond Length', fontsize=15)\nplt.ylabel(r'ln$\\left< Cos(\\theta)\\right >$', fontsize=15)\n#plt.ylim(-1.9,0)\nfont = font_manager.FontProperties(family='Arial', style='normal', size='14')\nplt.legend([r'$N_{PEG}$ = 6: $L_{p}$ = 18.8 $\\AA$ ± 3.60 $\\AA$'], loc=3, frameon=0, fontsize=14, prop=font)\nplt.tick_params(labelsize=14)\n#plt.text(0.5, -6.94,r'$N_{PEG}$ = 6: $L_{p}$ = 18.8 $\\AA$ ± 3.61 $\\AA$', fontsize=15, color='#1F2E69')",
"_____no_output_____"
],
[
"rgpeg_olig_dmso = pd.DataFrame(data=n6_blkspeg_dmso[\"Avg Radius of gyration\"], columns=['$R_{g}$ [Angstrom] N = 6 PEG DMSO'])\nrgpeg_olig_dmso",
"_____no_output_____"
],
[
"pers_pegt_dmso = pd.DataFrame(data=n6_blkspeg_dmso[\"Avg persistence length\"], columns=[r\"$L_{p}$ [Angstrom] N = 6 PEG DMSO \"])\npers_pegt_dmso",
"_____no_output_____"
]
],
[
[
"### N = 8 PEG/DMSO ",
"_____no_output_____"
]
],
[
[
"# For the right Rg calculation using MD Analysis, use trajactory without pbc \nn8_peg_dmso = mda.Universe(\"n8peg_dmso/n8pegonly_dmso.pdb\", \"n8peg_dmso/nodmso_n8peg.xtc\")",
"_____no_output_____"
],
[
"n8_peg_dmso.trajectory",
"_____no_output_____"
],
[
"len(n8_peg_dmso.trajectory)",
"_____no_output_____"
],
[
"#Select the polymer heavy atoms \npeg_n8dmso = n8_peg_dmso.select_atoms(\"resname sPEG PEG tPEG and not type H\")",
"_____no_output_____"
],
[
"crv_n8peg_dmso = pers_length(peg_n8dmso,8)\ncrv_n8peg_dmso",
"_____no_output_____"
],
[
"com_bond_n8dmso = np.zeros(shape=(1,18000))\ncount = 0\nfor ts in n8_peg_dmso.trajectory[0:18000]:\n n8_mon1_dmso = n8_peg_dmso.select_atoms(\"resid 1\")\n n8_mon2_dmso = n8_peg_dmso.select_atoms(\"resid 2\")\n oo_len = mda.analysis.distances.distance_array(n8_mon1_dmso.center_of_mass(), n8_mon2_dmso.center_of_mass(), \n box=n8_peg_dmso.trajectory.ts.dimensions)\n com_bond_n8dmso[0, count] = oo_len\n count += 1\n ",
"_____no_output_____"
],
[
"com_bond",
"_____no_output_____"
],
[
"lb_avg_pn6",
"_____no_output_____"
],
[
"np.std(com_bond)",
"_____no_output_____"
],
[
"np.mean(com_bond_n8dmso)",
"_____no_output_____"
],
[
"np.std(com_bond_n8dmso)",
"_____no_output_____"
]
],
[
[
"### Radius of Gyration vs. time N = 8 PEG/dmso",
"_____no_output_____"
]
],
[
[
"n8peg_rgens_dmso, cor_n8peg_dmso, N8peg_cos_dmso, rgdmso_n8peg = get_rg_pers_poly(peg_n8dmso, n8_peg_dmso, 0, 18000)",
"_____no_output_____"
],
[
"n8peg_rgens_dmso[0].shape",
"_____no_output_____"
],
[
"cor_n8peg_dmso[3]",
"_____no_output_____"
],
[
"N8peg_cos_dmso",
"_____no_output_____"
],
[
"rgdmso_n8peg",
"_____no_output_____"
],
[
"np.std(n8peg_rgens_dmso)",
"_____no_output_____"
],
[
"trj_len = np.arange(18000)\n\n#trj_len += 1\ntrj_len\n",
"_____no_output_____"
],
[
"plt.figure(figsize=(7,7))\nplt.title(r'PEG Radius of Gyration', fontsize=18, y=1.01)\nplt.xlabel(r'Time [ns]', fontsize=15)\nplt.ylabel(r'$R_{g}$ [nm]', fontsize=15)\nplt.plot(trj_len/100, n6peg_rgens_dmso[0]/10,linewidth=2, color='#1F2E69')\nplt.plot(trj_len/100, n8peg_rgens_dmso[0]/10,linewidth=2, color='#4C80ED')\nplt.tick_params(labelsize=14)\nplt.legend(['N = 6 in DMSO','N = 8 in DMSO'], frameon=False, fontsize=14)\n#plt.text(127, 0.96,r'N = 6 in water', fontsize=18, color='#1F2E69', family='Arial')\nplt.xlim(0,180)\nplt.ylim(0.2,2)",
"_____no_output_____"
]
],
[
[
"#### Correlation values at each arc length for the whole 180 ns trajectory, N = 8 PEG/dmso",
"_____no_output_____"
]
],
[
[
"# x values\nblen_n8dmso = cor_n8peg_dmso[3]*lb_avg_pn6\n#nt_tt[0] = 0\nblen_n8dmso",
"_____no_output_____"
],
[
"# Error prop. into natural log std deviation\nmk_n8p_dmso = cor_n8peg_dmso[1]/cor_n8peg_dmso[0]\nmk_n8p_dmso",
"_____no_output_____"
],
[
"plt.figure(figsize=(7,7))\nplt.errorbar(blen_dmso, np.log(cor_n6peg_dmso[0]), yerr=mk_n6p_dmso, color='#1F2E69', linestyle=\"None\",marker='o',\n capsize=5, capthick=1, ecolor='black')\nplt.errorbar(blen_n8dmso, np.log(cor_n8peg_dmso[0]), yerr=mk_n8p_dmso, color='#4C80ED', linestyle=\"None\",marker='o',\n capsize=5, capthick=1, ecolor='black')\nplt.legend(['N = 6 in DMSO','N = 8 in DMSO'], frameon=False, fontsize=14)",
"_____no_output_____"
],
[
"# All the points give the best fits for N = 6 peg in water\nn8_blkspeg_dmso , n8peg_lpdmso = bavg_pers_cnt(5, peg_n8dmso, n8_peg_dmso, lb_avg_pn6, 5, 3000 , 18000)",
"3000\n6000\nLp [Angstroms]: 24.031715816800098\nError in Lp from fit [Angstroms], 95% CL : 1.3514244964624402\nR2 score: 0.9934164600662859\n6000\n9000\nLp [Angstroms]: 24.51684858662788\nError in Lp from fit [Angstroms], 95% CL : 1.2257993486178242\nR2 score: 0.9946972792177611\n9000\n12000\nLp [Angstroms]: 23.768712419858353\nError in Lp from fit [Angstroms], 95% CL : 1.6733504893296405\nR2 score: 0.9891790841101049\n12000\n15000\nLp [Angstroms]: 18.25658600869716\nError in Lp from fit [Angstroms], 95% CL : 2.0566628314371584\nR2 score: 0.975946409087187\n15000\n18000\nLp [Angstroms]: 20.10864274157849\nError in Lp from fit [Angstroms], 95% CL : 1.9596841593406975\nR2 score: 0.9815079607707535\n"
],
[
"n8_blkspeg_dmso",
"_____no_output_____"
],
[
"n8peg_lpdmso",
"_____no_output_____"
],
[
"n8peg_lpdmso[2]",
"_____no_output_____"
],
[
"np.mean(n8peg_lpdmso[3])",
"_____no_output_____"
],
[
"blen_dmso",
"_____no_output_____"
],
[
"blen_n8dmso",
"_____no_output_____"
],
[
"gg_n8peg_dmso = line_fit(np.mean(n8peg_lpdmso[2]),blen_n8dmso)",
"_____no_output_____"
],
[
"gg_n6peg_n8dmso = line_fit(np.mean(n6peg_lpdmso[2]),blen_n8dmso)",
"_____no_output_____"
],
[
"gg_n8peg_dmso",
"_____no_output_____"
]
],
[
[
"### Block averaged Radius of gyration and persistence length, N = 8 PEG/DMSO",
"_____no_output_____"
]
],
[
[
"np.mean(n8_blkspeg_dmso[\"Avg persistence length\"])",
"_____no_output_____"
],
[
"np.std(n8_blkspeg_dmso[\"Avg persistence length\"])",
"_____no_output_____"
],
[
"np.mean(n8_blkspeg_dmso[\"Avg Radius of gyration\"])",
"_____no_output_____"
],
[
"np.std(n8_blkspeg_dmso[\"Avg Radius of gyration\"])",
"_____no_output_____"
],
[
"plt.figure(figsize=(7,7))\nplt.errorbar(blen_dmso, np.log(cor_n6peg_dmso[0]), yerr=mk_n6p_dmso, color='#1F2E69', linestyle=\"None\",marker='o',\n capsize=5, capthick=1, ecolor='black')\nplt.errorbar(blen_n8dmso, np.log(cor_n8peg_dmso[0]), yerr=mk_n8p_dmso, color='#4C80ED', linestyle=\"None\",marker='o',\n capsize=5, capthick=1, ecolor='black')\nplt.plot(blen_n8dmso, gg_n6peg_n8dmso, color='#1F2E69')\nplt.plot(blen_n8dmso, gg_n8peg_dmso, color='#4C80ED')\nplt.title(r'Ensemble Averaged ln(Cosine $\\theta$) in DMSO', fontsize=15, y=1.01)\nplt.xlabel(r'Bond Length', fontsize=15)\nplt.ylabel(r'ln$\\left< Cos(\\theta)\\right >$', fontsize=15)\nplt.ylim(-6,1)\nplt.xlim(0,30)\n#plt.ylim(-1.9,0)\nfont = font_manager.FontProperties(family='Arial', style='normal', size='14')\n#plt.legend([r'$N_{PEG}$ = 6: $L_{p}$ = 18.8 $\\AA$ ± 3.60 $\\AA$'], loc=3, frameon=0, fontsize=14, prop=font)\nplt.tick_params(labelsize=14)\nplt.text(0.5, -5.9,r'$N_{PEG}$ = 6: $L_{p}$ = 18.8 $\\AA$ ± 3.61 $\\AA$', fontsize=15, color='#1F2E69')\nplt.text(0.5, -5.58,r'$N_{PEG}$ = 8: $L_{p}$ = 22.1 $\\AA$ ± 2.49 $\\AA$', fontsize=15, color='#4C80ED')",
"_____no_output_____"
],
[
"rgpeg_olig_dmso[r\"$R_{g}$ [Angstrom] N = 8 PEG DMSO \"] = n8_blkspeg_dmso[\"Avg Radius of gyration\"]\nrgpeg_olig_dmso",
"_____no_output_____"
],
[
"pers_pegt_dmso[r\"$L_{p}$ [Angstrom] N = 8 PEG DMSO \"] = n8_blkspeg_dmso[\"Avg persistence length\"]\npers_pegt_dmso",
"_____no_output_____"
]
],
[
[
"### N = 10 PEG/DMSO ",
"_____no_output_____"
]
],
[
[
"# For the right Rg calculation using MD Analysis, use trajactory without pbc \nn10_peg_dmso = mda.Universe(\"n10peg_dmso/n10pegonly_dmso.pdb\", \"n10peg_dmso/nodmso_n10peg.xtc\")",
"_____no_output_____"
],
[
"n10_peg_dmso.trajectory",
"_____no_output_____"
],
[
"len(n10_peg_dmso.trajectory)",
"_____no_output_____"
],
[
"#Select the polymer heavy atoms \npeg_n10dmso = n10_peg_dmso.select_atoms(\"resname sPEG PEG tPEG and not type H\")",
"_____no_output_____"
],
[
"crv_n10peg_dmso = pers_length(peg_n10dmso,10)\ncrv_n10peg_dmso",
"_____no_output_____"
],
[
"com_bond_n10dmso = np.zeros(shape=(1,18000))\ncount = 0\nfor ts in n10_peg_dmso.trajectory[0:18000]:\n n10_mon1_dmso = n10_peg_dmso.select_atoms(\"resid 1\")\n n10_mon2_dmso = n10_peg_dmso.select_atoms(\"resid 2\")\n oo_len = mda.analysis.distances.distance_array(n10_mon1_dmso.center_of_mass(), n10_mon2_dmso.center_of_mass(), \n box=n10_peg_dmso.trajectory.ts.dimensions)\n com_bond_n10dmso[0, count] = oo_len\n count += 1\n ",
"_____no_output_____"
],
[
"com_bond",
"_____no_output_____"
],
[
"lb_avg_pn6",
"_____no_output_____"
],
[
"np.std(com_bond)",
"_____no_output_____"
],
[
"np.mean(com_bond_n10dmso)",
"_____no_output_____"
],
[
"np.std(com_bond_n10dmso)",
"_____no_output_____"
]
],
[
[
"### Radius of Gyration vs. time N = 10 PEG/dmso",
"_____no_output_____"
]
],
[
[
"n10peg_rgens_dmso, cor_n10peg_dmso, N10peg_cos_dmso, rgdmso_n10peg = get_rg_pers_poly(peg_n10dmso, n10_peg_dmso, 0, 18000)",
"_____no_output_____"
],
[
"n10peg_rgens_dmso[0].shape",
"_____no_output_____"
],
[
"cor_n10peg_dmso[3]",
"_____no_output_____"
],
[
"N10peg_cos_dmso",
"_____no_output_____"
],
[
"rgdmso_n10peg",
"_____no_output_____"
],
[
"np.std(n10peg_rgens_dmso)",
"_____no_output_____"
],
[
"trj_len = np.arange(18000)\n\n#trj_len += 1\ntrj_len\n",
"_____no_output_____"
],
[
"plt.figure(figsize=(7,7))\nplt.title(r'PEG Radius of Gyration', fontsize=18, y=1.01)\nplt.xlabel(r'Time [ns]', fontsize=15)\nplt.ylabel(r'$R_{g}$ [nm]', fontsize=15)\nplt.plot(trj_len/100, n6peg_rgens_dmso[0]/10,linewidth=2, color='#1F2E69')\nplt.plot(trj_len/100, n8peg_rgens_dmso[0]/10,linewidth=2, color='#4C80ED')\nplt.plot(trj_len/100, n10peg_rgens_dmso[0]/10,linewidth=2, color='#8C52FC')\nplt.tick_params(labelsize=14)\nplt.legend(['N = 6 in DMSO','N = 8 in DMSO','N = 10 in DMSO' ], frameon=False, fontsize=14)\n#plt.text(127, 0.96,r'N = 6 in water', fontsize=18, color='#1F2E69', family='Arial')\nplt.xlim(0,180)\nplt.ylim(0.2,2)",
"_____no_output_____"
]
],
[
[
"#### Correlation values at each arc length for the whole 180 ns trajectory, N = 8 PEG/dmso",
"_____no_output_____"
]
],
[
[
"# x values\nblen_n10dmso = cor_n10peg_dmso[3]*lb_avg_pn6\n#nt_tt[0] = 0\nblen_n10dmso",
"_____no_output_____"
],
[
"# Error prop. into natural log std deviation\nmk_n10p_dmso = cor_n10peg_dmso[1]/cor_n10peg_dmso[0]\nmk_n10p_dmso",
"_____no_output_____"
],
[
"plt.figure(figsize=(7,7))\nplt.errorbar(blen_dmso, np.log(cor_n6peg_dmso[0]), yerr=mk_n6p_dmso, color='#1F2E69', linestyle=\"None\",marker='o',\n capsize=5, capthick=1, ecolor='black')\nplt.errorbar(blen_n8dmso, np.log(cor_n8peg_dmso[0]), yerr=mk_n8p_dmso, color='#4C80ED', linestyle=\"None\",marker='o',\n capsize=5, capthick=1, ecolor='black')\nplt.errorbar(blen_n10dmso, np.log(cor_n10peg_dmso[0]), yerr=mk_n10p_dmso, color='#8C52FC', linestyle=\"None\",marker='o',\n capsize=5, capthick=1, ecolor='black')\nplt.legend(['N = 6 in DMSO','N = 8 in DMSO','N = 10 in DMSO'], frameon=False, fontsize=14)",
"_____no_output_____"
],
[
"# All the points give the best fits for N = 6 peg in water\nn10_blkspeg_dmso , n10peg_lpdmso = bavg_pers_cnt(5, peg_n10dmso, n10_peg_dmso, lb_avg_pn6, 5, 3000 , 18000)",
"3000\n6000\nLp [Angstroms]: 22.492805070690395\nError in Lp from fit [Angstroms], 95% CL : 0.9369237622243255\nR2 score: 0.9903058695102129\n6000\n9000\nLp [Angstroms]: 17.20932821500442\nError in Lp from fit [Angstroms], 95% CL : 1.3782286452084431\nR2 score: 0.9676288904559768\n9000\n12000\nLp [Angstroms]: 21.63668598824028\nError in Lp from fit [Angstroms], 95% CL : 1.1976854345171484\nR2 score: 0.9835325679873382\n12000\n15000\nLp [Angstroms]: 20.51880811070344\nError in Lp from fit [Angstroms], 95% CL : 1.0029317588699187\nR2 score: 0.9869271194673283\n15000\n18000\nLp [Angstroms]: 24.10767645095798\nError in Lp from fit [Angstroms], 95% CL : 0.8469632408791449\nR2 score: 0.9929865424799854\n"
],
[
"n10_blkspeg_dmso",
"_____no_output_____"
],
[
"n10peg_lpdmso",
"_____no_output_____"
],
[
"n10peg_lpdmso[2]",
"_____no_output_____"
],
[
"np.mean(n10peg_lpdmso[3])",
"_____no_output_____"
],
[
"blen_dmso",
"_____no_output_____"
],
[
"blen_n10dmso",
"_____no_output_____"
],
[
"gg_n10peg_dmso = line_fit(np.mean(n10peg_lpdmso[2]),blen_n10dmso)",
"_____no_output_____"
],
[
"gg_n6peg_n10dmso = line_fit(np.mean(n6peg_lpdmso[2]),blen_n10dmso)",
"_____no_output_____"
],
[
"gg_n8peg_n10dmso = line_fit(np.mean(n8peg_lpdmso[2]),blen_n10dmso)",
"_____no_output_____"
],
[
"gg_n10peg_dmso",
"_____no_output_____"
]
],
[
[
"### Block averaged Radius of gyration and persistence length, N = 10 PEG/DMSO",
"_____no_output_____"
]
],
[
[
"np.mean(n10_blkspeg_dmso[\"Avg persistence length\"])",
"_____no_output_____"
],
[
"np.std(n10_blkspeg_dmso[\"Avg persistence length\"])",
"_____no_output_____"
],
[
"np.mean(n10_blkspeg_dmso[\"Avg Radius of gyration\"])",
"_____no_output_____"
],
[
"np.std(n10_blkspeg_dmso[\"Avg Radius of gyration\"])",
"_____no_output_____"
],
[
"plt.figure(figsize=(7,7))\nplt.errorbar(blen_dmso, np.log(cor_n6peg_dmso[0]), yerr=mk_n6p_dmso, color='#1F2E69', linestyle=\"None\",marker='o',\n capsize=5, capthick=1, ecolor='black')\nplt.errorbar(blen_n8dmso, np.log(cor_n8peg_dmso[0]), yerr=mk_n8p_dmso, color='#4C80ED', linestyle=\"None\",marker='o',\n capsize=5, capthick=1, ecolor='black')\nplt.errorbar(blen_n10dmso, np.log(cor_n10peg_dmso[0]), yerr=mk_n10p_dmso, color='#8C52FC', linestyle=\"None\",marker='o',\n capsize=5, capthick=1, ecolor='black')\nplt.plot(blen_n10dmso, gg_n6peg_n10dmso, color='#1F2E69')\nplt.plot(blen_n10dmso, gg_n8peg_n10dmso, color='#4C80ED')\nplt.plot(blen_n10dmso, gg_n10peg_dmso, color='#8C52FC')\nplt.title(r'Ensemble Averaged ln(Cosine $\\theta$) in DMSO', fontsize=15, y=1.01)\nplt.xlabel(r'Bond Length', fontsize=15)\nplt.ylabel(r'ln$\\left< Cos(\\theta)\\right >$', fontsize=15)\nplt.ylim(-6,1)\nplt.xlim(0,30)\n#plt.ylim(-1.9,0)\nfont = font_manager.FontProperties(family='Arial', style='normal', size='14')\n#plt.legend([r'$N_{PEG}$ = 6: $L_{p}$ = 18.8 $\\AA$ ± 3.60 $\\AA$'], loc=3, frameon=0, fontsize=14, prop=font)\nplt.tick_params(labelsize=14)\nplt.text(0.5, -5.9,r'$N_{PEG}$ = 6: $L_{p}$ = 18.8 $\\AA$ ± 3.61 $\\AA$', fontsize=15, color='#1F2E69')\nplt.text(0.5, -5.58,r'$N_{PEG}$ = 8: $L_{p}$ = 22.1 $\\AA$ ± 2.49 $\\AA$', fontsize=15, color='#4C80ED')\nplt.text(0.5, -5.23,r'$N_{PEG}$ = 10: $L_{p}$ = 21.2 $\\AA$ ± 2.31 $\\AA$', fontsize=15, color='#8C52FC')",
"_____no_output_____"
],
[
"rgpeg_olig_dmso[r\"$R_{g}$ [Angstrom] N = 10 PEG DMSO \"] = n10_blkspeg_dmso[\"Avg Radius of gyration\"]\nrgpeg_olig_dmso",
"_____no_output_____"
],
[
"pers_pegt_dmso[r\"$L_{p}$ [Angstrom] N = 10 PEG DMSO \"] = n10_blkspeg_dmso[\"Avg persistence length\"]\npers_pegt_dmso",
"_____no_output_____"
]
],
[
[
"### N = 20 PEG/DMSO ",
"_____no_output_____"
]
],
[
[
"# For the right Rg calculation using MD Analysis, use trajactory without pbc \nn20_peg_dmso = mda.Universe(\"n20peg_dmso/n20pegonly_dmso.pdb\", \"n20peg_dmso/nodmso_n20peg.xtc\")",
"_____no_output_____"
],
[
"n20_peg_dmso.trajectory",
"_____no_output_____"
],
[
"len(n20_peg_dmso.trajectory)",
"_____no_output_____"
],
[
"#Select the polymer heavy atoms \npeg_n20dmso = n20_peg_dmso.select_atoms(\"resname sPEG PEG tPEG and not type H\")",
"_____no_output_____"
],
[
"crv_n20peg_dmso = pers_length(peg_n20dmso,20)\ncrv_n20peg_dmso",
"_____no_output_____"
],
[
"com_bond_n20dmso = np.zeros(shape=(1,18000))\ncount = 0\nfor ts in n20_peg_dmso.trajectory[0:18000]:\n n20_mon1_dmso = n20_peg_dmso.select_atoms(\"resid 1\")\n n20_mon2_dmso = n20_peg_dmso.select_atoms(\"resid 2\")\n oo_len = mda.analysis.distances.distance_array(n20_mon1_dmso.center_of_mass(), n20_mon2_dmso.center_of_mass(), \n box=n20_peg_dmso.trajectory.ts.dimensions)\n com_bond_n20dmso[0, count] = oo_len\n count += 1\n ",
"_____no_output_____"
],
[
"com_bond",
"_____no_output_____"
],
[
"lb_avg_pn6",
"_____no_output_____"
],
[
"np.std(com_bond)",
"_____no_output_____"
],
[
"np.mean(com_bond_n20dmso)",
"_____no_output_____"
],
[
"np.std(com_bond_n20dmso)",
"_____no_output_____"
]
],
[
[
"### Radius of Gyration vs. time N = 20 PEG/dmso",
"_____no_output_____"
]
],
[
[
"n20peg_rgens_dmso, cor_n20peg_dmso, N20peg_cos_dmso, rgdmso_n20peg = get_rg_pers_poly(peg_n20dmso, n20_peg_dmso, 0, 18000)",
"_____no_output_____"
],
[
"n20peg_rgens_dmso[0].shape",
"_____no_output_____"
],
[
"cor_n20peg_dmso[3]",
"_____no_output_____"
],
[
"N20peg_cos_dmso",
"_____no_output_____"
],
[
"rgdmso_n20peg",
"_____no_output_____"
],
[
"np.std(n20peg_rgens_dmso)",
"_____no_output_____"
],
[
"plt.figure(figsize=(7,7))\nplt.title(r'PEG Radius of Gyration', fontsize=18, y=1.01)\nplt.xlabel(r'Time [ns]', fontsize=15)\nplt.ylabel(r'$R_{g}$ [nm]', fontsize=15)\nplt.plot(trj_len/100, n6peg_rgens_dmso[0]/10,linewidth=2, color='#1F2E69')\nplt.plot(trj_len/100, n8peg_rgens_dmso[0]/10,linewidth=2, color='#4C80ED')\nplt.plot(trj_len/100, n10peg_rgens_dmso[0]/10,linewidth=2, color='#8C52FC')\nplt.plot(trj_len/100, n20peg_rgens_dmso[0]/10,linewidth=2, color='#8B7F47')\nplt.tick_params(labelsize=14)\nplt.legend(['N = 6 in DMSO','N = 8 in DMSO','N = 10 in DMSO','N = 20 in DMSO'], frameon=False, fontsize=14)\n#plt.text(127, 0.96,r'N = 6 in water', fontsize=18, color='#1F2E69', family='Arial')\nplt.xlim(0,180)\nplt.ylim(0.2,2)",
"_____no_output_____"
]
],
[
[
"#### Correlation values at each arc length for the whole 180 ns trajectory, N = 20 PEG/dmso",
"_____no_output_____"
]
],
[
[
"# x values\nblen_n20dmso = cor_n20peg_dmso[3]*lb_avg_pn6\n#nt_tt[0] = 0\nblen_n20dmso",
"_____no_output_____"
],
[
"# Error prop. into natural log std deviation\nmk_n20p_dmso = cor_n20peg_dmso[1]/cor_n20peg_dmso[0]\nmk_n20p_dmso",
"_____no_output_____"
],
[
"plt.figure(figsize=(7,7))\nplt.errorbar(blen_dmso, np.log(cor_n6peg_dmso[0]), yerr=mk_n6p_dmso, color='#1F2E69', linestyle=\"None\",marker='o',\n capsize=5, capthick=1, ecolor='black')\nplt.errorbar(blen_n8dmso, np.log(cor_n8peg_dmso[0]), yerr=mk_n8p_dmso, color='#4C80ED', linestyle=\"None\",marker='o',\n capsize=5, capthick=1, ecolor='black')\nplt.errorbar(blen_n10dmso, np.log(cor_n10peg_dmso[0]), yerr=mk_n10p_dmso, color='#8C52FC', linestyle=\"None\",marker='o',\n capsize=5, capthick=1, ecolor='black')\nplt.errorbar(blen_n20dmso, np.log(cor_n20peg_dmso[0]), yerr=mk_n20p_dmso, color='#8B7F47', linestyle=\"None\",marker='o',\n capsize=5, capthick=1, ecolor='black')\nplt.legend(['N = 6 in DMSO','N = 8 in DMSO','N = 10 in DMSO','N = 20 in DMSO'], frameon=False, fontsize=14)",
"_____no_output_____"
],
[
"# All the points give the best fits for N = 6 peg in water\nn20_blkspeg_dmso , n20peg_lpdmso = bavg_pers_cnt(5, peg_n20dmso, n20_peg_dmso, lb_avg_pn6, 5, 3000 , 18000)",
"3000\n6000\nLp [Angstroms]: 24.360254268278428\nError in Lp from fit [Angstroms], 95% CL : 0.21305591430778867\nR2 score: 0.9931032834573079\n6000\n9000\nLp [Angstroms]: 21.492769238261108\nError in Lp from fit [Angstroms], 95% CL : 0.2598127990634099\nR2 score: 0.9874401568877104\n9000\n12000\nLp [Angstroms]: 21.707005124486685\nError in Lp from fit [Angstroms], 95% CL : 0.25433781334677297\nR2 score: 0.9881551815611648\n12000\n15000\nLp [Angstroms]: 24.256415557196032\nError in Lp from fit [Angstroms], 95% CL : 0.2429912354058217\nR2 score: 0.9911607410208927\n15000\n18000\nLp [Angstroms]: 22.831261064594724\nError in Lp from fit [Angstroms], 95% CL : 0.23764917940632213\nR2 score: 0.990435684398902\n"
],
[
"n20_blkspeg_dmso",
"_____no_output_____"
],
[
"n20peg_lpdmso",
"_____no_output_____"
],
[
"n20peg_lpdmso[2]",
"_____no_output_____"
],
[
"np.mean(n20peg_lpdmso[3])",
"_____no_output_____"
],
[
"blen_dmso",
"_____no_output_____"
],
[
"blen_n20dmso",
"_____no_output_____"
],
[
"gg_n20peg_dmso = line_fit(np.mean(n20peg_lpdmso[2]),blen_n20dmso)",
"_____no_output_____"
],
[
"gg_n6peg_n20dmso = line_fit(np.mean(n6peg_lpdmso[2]),blen_n20dmso)",
"_____no_output_____"
],
[
"gg_n8peg_n20dmso = line_fit(np.mean(n8peg_lpdmso[2]),blen_n20dmso)",
"_____no_output_____"
],
[
"gg_n10peg_n20dmso = line_fit(np.mean(n10peg_lpdmso[2]),blen_n20dmso)",
"_____no_output_____"
],
[
"gg_n20peg_dmso",
"_____no_output_____"
]
],
[
[
"### Block averaged Radius of gyration and persistence length, N = 20 PEG/DMSO",
"_____no_output_____"
]
],
[
[
"np.mean(n20_blkspeg_dmso[\"Avg persistence length\"])",
"_____no_output_____"
],
[
"np.std(n20_blkspeg_dmso[\"Avg persistence length\"])",
"_____no_output_____"
],
[
"np.mean(n20_blkspeg_dmso[\"Avg Radius of gyration\"])",
"_____no_output_____"
],
[
"np.std(n20_blkspeg_dmso[\"Avg Radius of gyration\"])",
"_____no_output_____"
],
[
"plt.figure(figsize=(7,7))\nplt.errorbar(blen_dmso, np.log(cor_n6peg_dmso[0]), yerr=mk_n6p_dmso, color='#1F2E69', linestyle=\"None\",marker='o',\n capsize=5, capthick=1, ecolor='black')\nplt.errorbar(blen_n8dmso, np.log(cor_n8peg_dmso[0]), yerr=mk_n8p_dmso, color='#4C80ED', linestyle=\"None\",marker='o',\n capsize=5, capthick=1, ecolor='black')\nplt.errorbar(blen_n10dmso, np.log(cor_n10peg_dmso[0]), yerr=mk_n10p_dmso, color='#8C52FC', linestyle=\"None\",marker='o',\n capsize=5, capthick=1, ecolor='black')\nplt.errorbar(blen_n20dmso, np.log(cor_n20peg_dmso[0]), yerr=mk_n20p_dmso, color='#8B7F47', linestyle=\"None\",marker='o',\n capsize=5, capthick=1, ecolor='black')\nplt.plot(blen_n20dmso[:15], gg_n6peg_n20dmso[:15], color='#1F2E69')\nplt.plot(blen_n20dmso[:15], gg_n8peg_n20dmso[:15], color='#4C80ED')\nplt.plot(blen_n20dmso[:15], gg_n10peg_n20dmso[:15], color='#8C52FC')\nplt.plot(blen_n20dmso[:15], gg_n20peg_dmso[:15], color='#8B7F47')\nplt.title(r'Ensemble Averaged ln(Cosine $\\theta$) in DMSO', fontsize=15, y=1.01)\nplt.xlabel(r'Bond Length', fontsize=15)\nplt.ylabel(r'ln$\\left< Cos(\\theta)\\right >$', fontsize=15)\nplt.ylim(-6,1)\nplt.xlim(0,70)\n#plt.ylim(-1.9,0)\nfont = font_manager.FontProperties(family='Arial', style='normal', size='14')\n#plt.legend([r'$N_{PEG}$ = 6: $L_{p}$ = 18.8 $\\AA$ ± 3.60 $\\AA$'], loc=3, frameon=0, fontsize=14, prop=font)\nplt.tick_params(labelsize=14)\nplt.text(0.5, -5.9,r'$N_{PEG}$ = 6: $L_{p}$ = 18.8 $\\AA$ ± 3.61 $\\AA$', fontsize=15, color='#1F2E69')\nplt.text(0.5, -5.58,r'$N_{PEG}$ = 8: $L_{p}$ = 22.1 $\\AA$ ± 2.49 $\\AA$', fontsize=15, color='#4C80ED')\nplt.text(0.5, -5.23,r'$N_{PEG}$ = 10: $L_{p}$ = 21.2 $\\AA$ ± 2.31 $\\AA$', fontsize=15, color='#8C52FC')\nplt.text(0.5, -4.90,r'$N_{PEG}$ = 20: $L_{p}$ = 22.9 $\\AA$ ± 1.21 $\\AA$', fontsize=15, color='#8B7F47')",
"_____no_output_____"
],
[
"rgpeg_olig_dmso[r\"$R_{g}$ [Angstrom] N = 20 PEG DMSO \"] = n20_blkspeg_dmso[\"Avg Radius of gyration\"]\nrgpeg_olig_dmso",
"_____no_output_____"
],
[
"pers_pegt_dmso[r\"$L_{p}$ [Angstrom] N = 20 PEG DMSO \"] = n20_blkspeg_dmso[\"Avg persistence length\"]\npers_pegt_dmso",
"_____no_output_____"
]
],
[
[
"### N = 30 PEG/DMSO ",
"_____no_output_____"
]
],
[
[
"# For the right Rg calculation using MD Analysis, use trajactory without pbc \nn30_peg_dmso = mda.Universe(\"n30peg_dmso/n30pegonly_dmso.pdb\", \"n30peg_dmso/nodmso_n30peg.xtc\")",
"_____no_output_____"
],
[
"n30_peg_dmso.trajectory",
"_____no_output_____"
],
[
"len(n30_peg_dmso.trajectory)",
"_____no_output_____"
],
[
"#Select the polymer heavy atoms \npeg_n30dmso = n30_peg_dmso.select_atoms(\"resname sPEG PEG tPEG and not type H\")",
"_____no_output_____"
],
[
"crv_n30peg_dmso = pers_length(peg_n30dmso,30)\ncrv_n30peg_dmso",
"_____no_output_____"
],
[
"com_bond_n30dmso = np.zeros(shape=(1,18000))\ncount = 0\nfor ts in n30_peg_dmso.trajectory[0:18000]:\n n30_mon1_dmso = n30_peg_dmso.select_atoms(\"resid 1\")\n n30_mon2_dmso = n30_peg_dmso.select_atoms(\"resid 2\")\n oo_len = mda.analysis.distances.distance_array(n30_mon1_dmso.center_of_mass(), n30_mon2_dmso.center_of_mass(), \n box=n30_peg_dmso.trajectory.ts.dimensions)\n com_bond_n30dmso[0, count] = oo_len\n count += 1\n ",
"_____no_output_____"
],
[
"com_bond",
"_____no_output_____"
],
[
"lb_avg_pn6",
"_____no_output_____"
],
[
"np.std(com_bond)",
"_____no_output_____"
],
[
"np.mean(com_bond_n30dmso)",
"_____no_output_____"
],
[
"np.std(com_bond_n30dmso)",
"_____no_output_____"
]
],
[
[
"### Radius of Gyration vs. time N = 30 PEG/dmso",
"_____no_output_____"
]
],
[
[
"n30peg_rgens_dmso, cor_n30peg_dmso, N30peg_cos_dmso, rgdmso_n30peg = get_rg_pers_poly(peg_n30dmso, n30_peg_dmso, 0, 18000)",
"_____no_output_____"
],
[
"n30peg_rgens_dmso[0].shape",
"_____no_output_____"
],
[
"cor_n30peg_dmso[3]",
"_____no_output_____"
],
[
"N30peg_cos_dmso",
"_____no_output_____"
],
[
"rgdmso_n30peg",
"_____no_output_____"
],
[
"np.std(n30peg_rgens_dmso)",
"_____no_output_____"
],
[
"plt.figure(figsize=(7,7))\nplt.title(r'PEG Radius of Gyration', fontsize=18, y=1.01)\nplt.xlabel(r'Time [ns]', fontsize=15)\nplt.ylabel(r'$R_{g}$ [nm]', fontsize=15)\nplt.plot(trj_len/100, n6peg_rgens_dmso[0]/10,linewidth=2, color='#1F2E69')\nplt.plot(trj_len/100, n8peg_rgens_dmso[0]/10,linewidth=2, color='#4C80ED')\nplt.plot(trj_len/100, n10peg_rgens_dmso[0]/10,linewidth=2, color='#8C52FC')\nplt.plot(trj_len/100, n20peg_rgens_dmso[0]/10,linewidth=2, color='#8B7F47')\nplt.plot(trj_len/100, n30peg_rgens_dmso[0]/10,linewidth=2, color='#63ACBE')\nplt.tick_params(labelsize=14)\nplt.legend(['N = 6 in DMSO','N = 8 in DMSO','N = 10 in DMSO','N = 20 in DMSO','N = 30 in DMSO'], frameon=False, fontsize=14)\n#plt.text(127, 0.96,r'N = 6 in water', fontsize=18, color='#1F2E69', family='Arial')\nplt.xlim(0,180)\nplt.ylim(0.2,3)",
"_____no_output_____"
]
],
[
[
"#### Correlation values at each arc length for the whole 180 ns trajectory, N = 30 PEG/dmso",
"_____no_output_____"
]
],
[
[
"# x values\nblen_n30dmso = cor_n30peg_dmso[3]*lb_avg_pn6\n#nt_tt[0] = 0\nblen_n30dmso",
"_____no_output_____"
],
[
"# Error prop. into natural log std deviation\nmk_n30p_dmso = cor_n30peg_dmso[1]/cor_n30peg_dmso[0]\nmk_n30p_dmso",
"_____no_output_____"
],
[
"plt.figure(figsize=(7,7))\nplt.errorbar(blen_dmso, np.log(cor_n6peg_dmso[0]), yerr=mk_n6p_dmso, color='#1F2E69', linestyle=\"None\",marker='o',\n capsize=5, capthick=1, ecolor='black')\nplt.errorbar(blen_n8dmso, np.log(cor_n8peg_dmso[0]), yerr=mk_n8p_dmso, color='#4C80ED', linestyle=\"None\",marker='o',\n capsize=5, capthick=1, ecolor='black')\nplt.errorbar(blen_n10dmso, np.log(cor_n10peg_dmso[0]), yerr=mk_n10p_dmso, color='#8C52FC', linestyle=\"None\",marker='o',\n capsize=5, capthick=1, ecolor='black')\nplt.errorbar(blen_n20dmso, np.log(cor_n20peg_dmso[0]), yerr=mk_n20p_dmso, color='#8B7F47', linestyle=\"None\",marker='o',\n capsize=5, capthick=1, ecolor='black')\nplt.errorbar(blen_n30dmso, np.log(cor_n30peg_dmso[0]), yerr=mk_n30p_dmso, color='#63ACBE', linestyle=\"None\",marker='o',\n capsize=5, capthick=1, ecolor='black')\nplt.legend(['N = 6 in DMSO','N = 8 in DMSO','N = 10 in DMSO','N = 20 in DMSO','N = 30 in DMSO'], frameon=False, fontsize=14)",
"_____no_output_____"
],
[
"# All the points give the best fits for N = 6 peg in water\nn30_blkspeg_dmso , n30peg_lpdmso = bavg_pers_cnt(5, peg_n30dmso, n30_peg_dmso, lb_avg_pn6, 5, 3000 , 18000)",
"3000\n6000\nLp [Angstroms]: 23.86345447339396\nError in Lp from fit [Angstroms], 95% CL : 0.1166535192311226\nR2 score: 0.9912928583708327\n6000\n9000\nLp [Angstroms]: 23.753121899827295\nError in Lp from fit [Angstroms], 95% CL : 0.11586847222566812\nR2 score: 0.991309747083878\n9000\n12000\nLp [Angstroms]: 25.07552576394827\nError in Lp from fit [Angstroms], 95% CL : 0.10471197544385624\nR2 score: 0.9934684236348461\n12000\n15000\nLp [Angstroms]: 25.95801812439543\nError in Lp from fit [Angstroms], 95% CL : 0.0997631986981003\nR2 score: 0.9944366777128748\n15000\n18000\nLp [Angstroms]: 22.297741821168206\nError in Lp from fit [Angstroms], 95% CL : 0.12358115359164186\nR2 score: 0.9889928125471411\n"
],
[
"n30_blkspeg_dmso",
"_____no_output_____"
],
[
"n30peg_lpdmso",
"_____no_output_____"
],
[
"n30peg_lpdmso[2]",
"_____no_output_____"
],
[
"np.mean(n30peg_lpdmso[3])",
"_____no_output_____"
],
[
"blen_dmso",
"_____no_output_____"
],
[
"blen_n30dmso",
"_____no_output_____"
],
[
"gg_n30peg_dmso = line_fit(np.mean(n30peg_lpdmso[2]),blen_n30dmso)",
"_____no_output_____"
],
[
"gg_n6peg_n30dmso = line_fit(np.mean(n6peg_lpdmso[2]),blen_n30dmso)",
"_____no_output_____"
],
[
"gg_n8peg_n30dmso = line_fit(np.mean(n8peg_lpdmso[2]),blen_n30dmso)",
"_____no_output_____"
],
[
"gg_n10peg_n30dmso = line_fit(np.mean(n10peg_lpdmso[2]),blen_n30dmso)",
"_____no_output_____"
],
[
"gg_n20peg_n30dmso = line_fit(np.mean(n20peg_lpdmso[2]),blen_n30dmso)",
"_____no_output_____"
],
[
"gg_n30peg_dmso",
"_____no_output_____"
]
],
[
[
"### Block averaged Radius of gyration and persistence length, N = 30 PEG/DMSO",
"_____no_output_____"
]
],
[
[
"np.mean(n30_blkspeg_dmso[\"Avg persistence length\"])",
"_____no_output_____"
],
[
"np.std(n30_blkspeg_dmso[\"Avg persistence length\"])",
"_____no_output_____"
],
[
"np.mean(n30_blkspeg_dmso[\"Avg Radius of gyration\"])",
"_____no_output_____"
],
[
"np.std(n30_blkspeg_dmso[\"Avg Radius of gyration\"])",
"_____no_output_____"
],
[
"plt.figure(figsize=(7,7))\nplt.errorbar(blen_dmso, np.log(cor_n6peg_dmso[0]), yerr=mk_n6p_dmso, color='#1F2E69', linestyle=\"None\",marker='o',\n capsize=5, capthick=1, ecolor='black')\nplt.errorbar(blen_n8dmso, np.log(cor_n8peg_dmso[0]), yerr=mk_n8p_dmso, color='#4C80ED', linestyle=\"None\",marker='o',\n capsize=5, capthick=1, ecolor='black')\nplt.errorbar(blen_n10dmso, np.log(cor_n10peg_dmso[0]), yerr=mk_n10p_dmso, color='#8C52FC', linestyle=\"None\",marker='o',\n capsize=5, capthick=1, ecolor='black')\nplt.errorbar(blen_n20dmso, np.log(cor_n20peg_dmso[0]), yerr=mk_n20p_dmso, color='#8B7F47', linestyle=\"None\",marker='o',\n capsize=5, capthick=1, ecolor='black')\nplt.errorbar(blen_n30dmso, np.log(cor_n30peg_dmso[0]), yerr=mk_n30p_dmso, color='#63ACBE', linestyle=\"None\",marker='o',\n capsize=5, capthick=1, ecolor='black')\nplt.plot(blen_n20dmso[:15], gg_n6peg_n30dmso[:15], color='#1F2E69')\nplt.plot(blen_n20dmso[:15], gg_n8peg_n30dmso[:15], color='#4C80ED')\nplt.plot(blen_n20dmso[:15], gg_n10peg_n30dmso[:15], color='#8C52FC')\nplt.plot(blen_n20dmso[:15], gg_n20peg_n30dmso[:15], color='#8B7F47')\nplt.plot(blen_n30dmso[:15], gg_n30peg_dmso[:15], color='#63ACBE')\nplt.title(r'Ensemble Averaged ln(Cosine $\\theta$) in DMSO', fontsize=15, y=1.01)\nplt.xlabel(r'Bond Length', fontsize=15)\nplt.ylabel(r'ln$\\left< Cos(\\theta)\\right >$', fontsize=15)\nplt.ylim(-6,1)\nplt.xlim(0,90)\n#plt.ylim(-1.9,0)\nfont = font_manager.FontProperties(family='Arial', style='normal', size='14')\n#plt.legend([r'$N_{PEG}$ = 6: $L_{p}$ = 18.8 $\\AA$ ± 3.60 $\\AA$'], loc=3, frameon=0, fontsize=14, prop=font)\nplt.tick_params(labelsize=14)\nplt.text(0.5, -5.9,r'$N_{PEG}$ = 6: $L_{p}$ = 18.8 $\\AA$ ± 3.61 $\\AA$', fontsize=15, color='#1F2E69')\nplt.text(0.5, -5.58,r'$N_{PEG}$ = 8: $L_{p}$ = 22.1 $\\AA$ ± 2.49 $\\AA$', fontsize=15, color='#4C80ED')\nplt.text(0.5, -5.23,r'$N_{PEG}$ = 10: $L_{p}$ = 21.2 $\\AA$ ± 2.31 $\\AA$', fontsize=15, color='#8C52FC')\nplt.text(0.5, -4.90,r'$N_{PEG}$ = 20: $L_{p}$ = 22.9 $\\AA$ ± 1.21 $\\AA$', fontsize=15, color='#8B7F47')\nplt.text(0.5, -4.50,r'$N_{PEG}$ = 30: $L_{p}$ = 24.2 $\\AA$ ± 1.25 $\\AA$', fontsize=15, color='#63ACBE')",
"_____no_output_____"
],
[
"rgpeg_olig_dmso[r\"$R_{g}$ [Angstrom] N = 30 PEG DMSO \"] = n30_blkspeg_dmso[\"Avg Radius of gyration\"]\nrgpeg_olig_dmso",
"_____no_output_____"
],
[
"pers_pegt_dmso[r\"$L_{p}$ [Angstrom] N = 30 PEG DMSO \"] = n30_blkspeg_dmso[\"Avg persistence length\"]\npers_pegt_dmso",
"_____no_output_____"
],
[
"rgpeg_olig_dmso.to_pickle(\"PEG_dmso_Rg.pkl\")",
"_____no_output_____"
],
[
"pers_pegt_dmso.to_pickle(\"PEG_dmso_Lp.pkl\")",
"_____no_output_____"
]
],
[
[
"### Fluory Exponent, PEG/DMSO systems ",
"_____no_output_____"
]
],
[
[
"n_peg = np.array([6,8,10,20,30])\nrg_npeg_dmso = np.array([np.mean(n6_blkspeg_dmso[\"Avg Radius of gyration\"])\n ,np.mean(n8_blkspeg_dmso[\"Avg Radius of gyration\"]),np.mean(n10_blkspeg_dmso[\"Avg Radius of gyration\"])\n ,np.mean(n20_blkspeg_dmso[\"Avg Radius of gyration\"]),np.mean(n30_blkspeg_dmso[\"Avg Radius of gyration\"])])",
"_____no_output_____"
],
[
"rg_npeg_dmso",
"_____no_output_____"
],
[
"rgdmso_npeg_std = np.array([np.std(np.log10(n6_blkspeg_dmso[\"Avg Radius of gyration\"]))\n ,np.std(np.log10(n8_blkspeg_dmso[\"Avg Radius of gyration\"]))\n ,np.std(np.log10(n10_blkspeg_dmso[\"Avg Radius of gyration\"]))\n ,np.std(np.log10(n20_blkspeg_dmso[\"Avg Radius of gyration\"]))\n ,np.std(np.log10(n30_blkspeg_dmso[\"Avg Radius of gyration\"]))])\nrgdmso_npeg_std",
"_____no_output_____"
],
[
"n_peg",
"_____no_output_____"
],
[
"np.log10(rg_npeg_dmso)",
"_____no_output_____"
],
[
"np.log10(n_peg)",
"_____no_output_____"
],
[
"# From fitting all points, I get best fit \nfrom sklearn.linear_model import LinearRegression\nmodel_vdmso = LinearRegression(fit_intercept=True)\nmodel_vdmso.fit(np.log10(n_peg).reshape(-1,1), np.log10(rg_npeg_dmso))\n# Slope here is in nanometers\nprint(\"Model slope: \", model_vdmso.coef_[0])\nprint(\"Model intercept:\", model_vdmso.intercept_)",
"Model slope: 0.6330716745864243\nModel intercept: 0.16574815673194487\n"
],
[
"gg_dmso = model_vdmso.predict(np.log10(n_peg.reshape(-1,1)))\ngg_dmso",
"_____no_output_____"
],
[
"print(\"Mean Std Error:\", sklearn.metrics.mean_squared_error(np.log10(rg_npeg_dmso), gg_dmso))\nprint(\"R2 score:\", sklearn.metrics.r2_score(np.log10(rg_npeg_dmso), gg_dmso))",
"Mean Std Error: 2.5943574581609202e-05\nR2 score: 0.9990338036343539\n"
],
[
"# Residuals between the true y data and model y data \nresid_vdmso = np.log10(rg_npeg_dmso) - gg_dmso\nresid_vdmso",
"_____no_output_____"
],
[
"# How to calculate Sum((Xi - avg(X))^2): X values are the bond length values \nnt_ttace = np.log10(n_peg)\nnt_ttace -= np.mean(nt_ttace)\nnhui_ace = nt_ttace**2\nnp.sum(nhui_ace)",
"_____no_output_____"
],
[
"# t-value with 95 % confidence intervals \nscipy.stats.t.ppf(0.975, 4)",
"_____no_output_____"
],
[
"# How to calculate 95% confidence interval for the slope \nflc_vdmso = scipy.stats.t.ppf(0.975, 4)*np.sqrt((np.sum(resid_vdmso**2)/len(resid_vdmso))/(np.sum(nhui_ace)))\nflc_vdmso",
"_____no_output_____"
],
[
"plt.figure(figsize=(7,7))\nplt.errorbar(np.log10(n_peg), np.log10(rg_npeg_dmso), yerr=rgdmso_npeg_std, color='#A58262', linestyle=\"None\",marker='o',\n capsize=5, capthick=1, ecolor='black')\nplt.plot(np.log10(n_peg), gg_dmso, color='#A58262')\nplt.title(r'Fluory Exponent', fontsize=15)\nplt.xlabel(r'Log($N_{PEG}$)', fontsize=15)\nplt.ylabel(r'Log($R_{g}$)', fontsize=15)\nplt.tick_params(labelsize=14)\nplt.text(1.1, 0.75, r'$v_{DMSO}$ = 0.63 ± 0.02', fontsize=15, color='#A58262')",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a18e4a22bb1dbc7915c910d39e9f626544516d2
| 7,750 |
ipynb
|
Jupyter Notebook
|
exp3_targeted_data_collection.ipynb
|
sundyCoder/STPD
|
67207aac7f23c7ee0b6c674995a86c76cd1f6a4d
|
[
"MIT"
] | null | null | null |
exp3_targeted_data_collection.ipynb
|
sundyCoder/STPD
|
67207aac7f23c7ee0b6c674995a86c76cd1f6a4d
|
[
"MIT"
] | null | null | null |
exp3_targeted_data_collection.ipynb
|
sundyCoder/STPD
|
67207aac7f23c7ee0b6c674995a86c76cd1f6a4d
|
[
"MIT"
] | null | null | null | 29.92278 | 138 | 0.546839 |
[
[
[
"\n# Python Libraries\n%matplotlib inline\nimport pickle\nimport numpy as np\nimport pandas as pd\nimport matplotlib\nfrom keras.datasets import cifar10\nfrom keras import backend as K\nimport os,sys\n#import Pillow\n\n# Custom Networks\n#from networks.lenet import LeNet\n#sys.path.append('./')\nfrom networks.pure_cnn import PureCnn\nfrom networks.network_in_network import NetworkInNetwork\nfrom networks.resnet import ResNet\nfrom networks.densenet import DenseNet\nfrom networks.wide_resnet import WideResNet\nfrom networks.capsnet import CapsNet\n\n# Helper functions\nfrom differential_evolution import differential_evolution\nimport helper\nimport scipy.misc\n\n#from scipy.misc import imsave\n\nmatplotlib.style.use('ggplot')\nnp.random.seed(100)\n\ndef load_results():\n with open('networks/results/targeted_results.pkl', 'rb') as file:\n targeted = pickle.load(file)\n return targeted",
"_____no_output_____"
],
[
"(x_train, y_train), (x_test, y_test) = cifar10.load_data()\nclass_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']\n\nK.tensorflow_backend._get_available_gpus()\n#nin = NetworkInNetwork()\nresnet = ResNet()\n#densenet = DenseNet()\n\nmodels = [resnet]\n",
"_____no_output_____"
],
[
"network_stats, correct_imgs = helper.evaluate_models(models, x_test, y_test)\ncorrect_imgs = pd.DataFrame(correct_imgs, columns=['name', 'img', 'label', 'confidence', 'pred'])\nnetwork_stats = pd.DataFrame(network_stats, columns=['name', 'accuracy', 'param_count'])\n\nnetwork_stats",
"_____no_output_____"
],
[
"targeted = load_results()",
"_____no_output_____"
],
[
"columns = ['model', 'pixels', 'image', 'true', 'predicted', 'success', 'cdiff', 'prior_probs', 'predicted_probs', 'perturbation']\n\ntargeted_results = pd.DataFrame(targeted, columns=columns)",
"_____no_output_____"
],
[
"\nstats = []\nfor model in models:\n print(models)\n val_accuracy = np.array(network_stats[network_stats.name == model.name].accuracy)[0]\n m_result = targeted_results[targeted_results.model == model.name]\n pixels = list(set(m_result.pixels))\n print(model,pixels)\n for pixel in pixels:\n p_result = m_result[m_result.pixels == pixel]\n success_rate = len(p_result[p_result.success]) / len(p_result)\n print(len(p_result[p_result.success]))\n print(len(p_result))\n stats.append([model.name, val_accuracy, pixel, success_rate])\n\n#helper.attack_stats(targeted_results, models, network_stats)",
"_____no_output_____"
],
[
"model.name",
"_____no_output_____"
],
[
"def visualize_attack(df, class_names):\n _, (x_test, _) = cifar10.load_data()\n\n results = df[df.success]#.sample(9)\n print(results.shape)\n \n z = zip(results.perturbation, x_test[results.image])\n images = np.array([perturb_image(p, img)[0]\n for p,img in z])\n \n labels_true = np.array(results.true)\n labels_pred = np.array(results.predicted)\n #titles = np.array(results.model)",
"_____no_output_____"
],
[
"def perturb_image(xs, img):\n # If this function is passed just one perturbation vector,\n # pack it in a list to keep the computation the same\n if xs.ndim < 2:\n xs = np.array([xs])\n \n # Copy the image n == len(xs) times so that we can \n # create n new perturbed images\n tile = [len(xs)] + [1]*(xs.ndim+1)\n imgs = np.tile(img, tile)\n \n # Make sure to floor the members of xs as int types\n xs = xs.astype(int)\n \n for x,img in zip(xs, imgs):\n # Split x into an array of 5-tuples (perturbation pixels)\n # i.e., [[x,y,r,g,b], ...]\n pixels = np.split(x, len(x) // 5)\n for pixel in pixels:\n # At each pixel's x,y position, assign its rgb value\n x_pos, y_pos, *rgb = pixel\n img[x_pos, y_pos] = rgb\n \n return imgs",
"_____no_output_____"
],
[
"# print('Targeted Attack')\n# helper.visualize_attack(targeted_results, class_names)\nmodel_id = model.name\npixel_id = 5\n_, (x_test, _) = cifar10.load_data()\nresults = targeted_results[targeted_results.success]\n#print(results.model)\nresults = results[results.model==model.name]\nresults = results[results.pixels==pixel_id]\nprint(results.shape)\n\nz = zip(results.perturbation, x_test[results.image])\nimages = np.array([perturb_image(p, img)[0] for p,img in z])\nlabels_true = np.array(results.true)\nlabels_pred = np.array(results.predicted)\n\nimage_id = [id for id in results.image]\ncount = 0\nfor i in range(len(labels_true)):\n name = str(count)+'_'+str(image_id[i])+'_'+str(labels_true[i])+'_'+str(labels_pred[i])+'.png'\n image = images[i]\n print(name)\n #import cv2 as cv\n out_path = 'non_'+model_id+'_p'+str(pixel_id)+'/'\n if not os.path.exists(out_path):\n os.system(\"mkdir -p %s\"%(out_path))\n out_name = out_path + name\n #cv.imwrite(out_name, image)\n from PIL import Image\n im = Image.fromarray(image)\n print(out_name)\n im.save(out_name)\n count += 1\n #import imageio\n #imageio.imwrite(out_name,image)\n #imsave(out_name,image)",
"_____no_output_____"
],
[
"results",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a18e6a4e1d21c40203580ea9e0f19997560196b
| 12,807 |
ipynb
|
Jupyter Notebook
|
correctednav_to_pix4dnav.ipynb
|
awi-response/MACS_tools
|
bfd67a9b80f280e33275b86d9b836c7e884f98d9
|
[
"FTL",
"OML"
] | null | null | null |
correctednav_to_pix4dnav.ipynb
|
awi-response/MACS_tools
|
bfd67a9b80f280e33275b86d9b836c7e884f98d9
|
[
"FTL",
"OML"
] | 12 |
2021-04-23T05:53:15.000Z
|
2022-03-28T07:38:28.000Z
|
correctednav_to_pix4dnav.ipynb
|
awi-response/MACS_tools
|
bfd67a9b80f280e33275b86d9b836c7e884f98d9
|
[
"FTL",
"OML"
] | null | null | null | 28.396896 | 131 | 0.396502 |
[
[
[
"### Script to convert geolocation into correct format",
"_____no_output_____"
]
],
[
[
"import geopandas as gpd\nimport pandas as pd\nimport os",
"_____no_output_____"
],
[
"%config Completer.use_jedi = False",
"_____no_output_____"
]
],
[
[
"### Settings",
"_____no_output_____"
]
],
[
[
"INFILE = 'nav.txt'\noutfile = 'geo_pix4d.txt'\nNEW_CRS = 'EPSG:4326'\nH_ACC = 1\nV_ACC = 1",
"_____no_output_____"
]
],
[
[
"#### Load images ",
"_____no_output_____"
]
],
[
[
"df = pd.read_csv(INFILE, sep='\\t')",
"_____no_output_____"
]
],
[
[
"#### Change image suffixes ",
"_____no_output_____"
]
],
[
[
"images = df['File '].str.replace('.macs', '.tif')",
"_____no_output_____"
],
[
"images = images.apply(lambda x: x.strip().split('/')[-1])",
"_____no_output_____"
]
],
[
[
"#### Load coordinates to geom an reproject to selected crs ",
"_____no_output_____"
]
],
[
[
"#gdf = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df['latitude [decimal degrees]'], df['longitude [decimal degrees]']))",
"_____no_output_____"
],
[
"series = gpd.GeoSeries(gpd.points_from_xy(df['Lon[deg] '], df['Lat[deg] ']), crs='EPSG:4326')\nseries_new = series.to_crs(crs=NEW_CRS)",
"_____no_output_____"
]
],
[
[
"#### Fill Table ",
"_____no_output_____"
]
],
[
[
"df['imagename_tif'] = images\ndf['x'] = series_new.geometry.x\ndf['y'] = series_new.geometry.y\ndf['horizontal_accuracy'] = H_ACC\ndf['vertical_accuracy'] = V_ACC",
"_____no_output_____"
]
],
[
[
"#### Create final structure",
"_____no_output_____"
]
],
[
[
"df.columns",
"_____no_output_____"
],
[
"df_new = df[['imagename_tif', 'y', 'x', 'Alt[m] ', 'Omega[deg] ',\n 'Phi[deg] ', 'Kappa[deg]', 'horizontal_accuracy', 'vertical_accuracy']]",
"_____no_output_____"
],
[
"df_new",
"_____no_output_____"
]
],
[
[
"#### Export file ",
"_____no_output_____"
]
],
[
[
"df_new.to_csv(outfile, sep='\\t', header=True, index=False)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a18e75a3bc34e7af3d28a08e869ecdf4146722e
| 721,831 |
ipynb
|
Jupyter Notebook
|
02_image_level.ipynb
|
gykovacs/retina_vessel_segmentation
|
f544a0a90f543a36ff87459abca3afca710db7b9
|
[
"MIT"
] | null | null | null |
02_image_level.ipynb
|
gykovacs/retina_vessel_segmentation
|
f544a0a90f543a36ff87459abca3afca710db7b9
|
[
"MIT"
] | null | null | null |
02_image_level.ipynb
|
gykovacs/retina_vessel_segmentation
|
f544a0a90f543a36ff87459abca3afca710db7b9
|
[
"MIT"
] | null | null | null | 232.773621 | 59,622 | 0.444998 |
[
[
[
"# Image level consistency check",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nimport os\nimport os.path\n\nimport matplotlib.pyplot as plt\nimport plotly.express as px\n\nfrom core import *\nfrom config import image_stats_file, xls_file, figures_dir, latex_dir, image_level_results_file, image_level_threshold\n\npd.set_option('display.max_rows', None)\npd.set_option('display.max_colwidth', 100)\npd.set_option('display.width', 10000)",
"_____no_output_____"
],
[
"# reading image statistics\ndata= pd.read_csv(image_stats_file)\n\n# reading the summary page\nmethods= pd.read_excel(xls_file, engine='openpyxl')\n#methods= methods.iloc[:methods[methods['key'].isnull()].index[0]]\nmethods= methods[methods['flag'] == 'primary']\nmethods.index= methods['key']\n\n# reading the image level figures\nxl= pd.ExcelFile(xls_file, engine='openpyxl')\nimage_level= {}\nfor s in xl.sheet_names[1:]:\n image_level[s]= xl.parse(s)\n\nprint('image level figures available for: %s' % str(list(image_level.keys())))",
"image level figures available for: ['mo2017', 'meng2015', 'hassan2018', 'tang2017', 'zhu2016', 'geetharamani2016', 'wang2015', 'singh2016', 'singh2017', 'saroj2020', 'dash2018', 'fathi2013', 'imani2015', 'emary2014', 'waheed2015', 'rahebi2014', 'thangaraj2017', 'adapa2020', 'escorcia-gutierrez2020', 'li2016', 'khan2016', 'fraz2012b', 'fraz2012', 'lupascu2010', 'marin2011', 'ricci2007', 'barkana2017', 'tamim2020', 'frucci2016', 'dash2020', 'moghimirad2012', 'odstrcilik2013', 'biswal2017', 'bharkad2017', 'lupascu2016', 'kumar2020', 'narkthewan2019']\n"
],
[
"methods.columns",
"_____no_output_____"
],
[
"methods.index",
"_____no_output_____"
],
[
"# test images with annotations #1 as ground truth\ndata_test= data[(data['test'] == True) & (data['annotator'] == 1)].reset_index()\n\n# test images with annotations #2 as ground truth\ndata_test_obs2= data[(data['test'] == True) & (data['annotator'] == 2)].reset_index()",
"_____no_output_____"
],
[
"# extracting figures with and without FoV\ndata_test_with_fov= data_test[data_test['fov'] == True].reset_index(drop=True)\ndata_test_without_fov= data_test[data_test['fov'] == False].reset_index(drop=True)\n\ndata_test_with_fov_obs2= data_test_obs2[data_test_obs2['fov'] == True].reset_index(drop=True)\ndata_test_without_fov_obs2= data_test_obs2[data_test_obs2['fov'] == False].reset_index(drop=True)",
"_____no_output_____"
],
[
"data_test_with_fov",
"_____no_output_____"
]
],
[
[
"## Calculating the scores for all image level figures",
"_____no_output_____"
]
],
[
[
"# checking the consistencies at the image level\n\nfor s in image_level:\n if not s in methods.index:\n continue\n\n print('processing', s)\n\n for i, row in image_level[s].iterrows():\n image_id= row['image']\n\n p_with_fov= data_test_with_fov[data_test_with_fov['id'] == image_id]['p'].values[0]\n n_with_fov= data_test_with_fov[data_test_with_fov['id'] == image_id]['n'].values[0]\n p_without_fov= data_test_without_fov[data_test_without_fov['id'] == image_id]['p'].values[0]\n n_without_fov= data_test_without_fov[data_test_without_fov['id'] == image_id]['n'].values[0]\n\n p_with_fov_obs2= data_test_with_fov[data_test_with_fov_obs2['id'] == image_id]['p'].values[0]\n n_with_fov_obs2= data_test_with_fov[data_test_with_fov_obs2['id'] == image_id]['n'].values[0]\n p_without_fov_obs2= data_test_without_fov[data_test_without_fov_obs2['id'] == image_id]['p'].values[0]\n n_without_fov_obs2= data_test_without_fov[data_test_without_fov_obs2['id'] == image_id]['n'].values[0]\n\n digits= methods.loc[s]['digits']\n if digits > 2:\n eps= 10.0**(-digits)\n else:\n eps= 10.0**(-digits)/2\n\n image_level[s].loc[i, 'n_with_fov']= n_with_fov\n image_level[s].loc[i, 'n_without_fov']= n_without_fov\n image_level[s].loc[i, 'n_with_fov_obs2']= n_with_fov_obs2\n image_level[s].loc[i, 'n_without_fov_obs2']= n_without_fov_obs2\n image_level[s].loc[i, 'p_with_fov']= p_with_fov\n image_level[s].loc[i, 'p_without_fov']= p_without_fov\n image_level[s].loc[i, 'p_with_fov_obs2']= p_with_fov_obs2\n image_level[s].loc[i, 'p_without_fov_obs2']= p_without_fov_obs2\n\n image_level[s].loc[i, 'consistency_with_fov']= consistency_image_level(p_with_fov, n_with_fov, row['acc'], row['sens'], row['spec'], eps)\n image_level[s].loc[i, 'consistency_without_fov']= consistency_image_level(p_without_fov, n_without_fov, row['acc'], row['sens'], row['spec'], eps)\n\n image_level[s].loc[i, 'consistency_with_fov_obs2']= consistency_image_level(p_with_fov_obs2, n_with_fov_obs2, row['acc'], row['sens'], row['spec'], eps)\n image_level[s].loc[i, 'consistency_without_fov_obs2']= consistency_image_level(p_without_fov_obs2, n_without_fov_obs2, row['acc'], row['sens'], row['spec'], eps)",
"processing mo2017\nprocessing meng2015\nprocessing hassan2018\nprocessing tang2017\nprocessing zhu2016\nprocessing geetharamani2016\nprocessing wang2015\nprocessing singh2016\nprocessing singh2017\nprocessing saroj2020\nprocessing dash2018\nprocessing fathi2013\nprocessing imani2015\nprocessing emary2014\nprocessing waheed2015\nprocessing rahebi2014\nprocessing thangaraj2017\nprocessing adapa2020\nprocessing escorcia-gutierrez2020\nprocessing li2016\nprocessing khan2016\nprocessing fraz2012b\nprocessing fraz2012\nprocessing lupascu2010\nprocessing marin2011\nprocessing ricci2007\nprocessing barkana2017\nprocessing tamim2020\nprocessing frucci2016\nprocessing dash2020\nprocessing moghimirad2012\nprocessing odstrcilik2013\nprocessing bharkad2017\nprocessing lupascu2016\nprocessing kumar2020\nprocessing narkthewan2019\n"
],
[
"# calculating the percentages of images with a given number of negatives falling in the calculated range\nfor key in image_level:\n if not key in methods.index:\n continue\n methods.loc[key, 'image_level_consistency_with_fov']= np.sum(image_level[key]['consistency_with_fov']*1)/len(image_level[key])\n methods.loc[key, 'image_level_consistency_without_fov']= np.sum(image_level[key]['consistency_without_fov']*1)/len(image_level[key])\n methods.loc[key, 'n_image_level']= len(image_level[key])",
"_____no_output_____"
]
],
[
[
"## Printing the results of all image level figures\n",
"_____no_output_____"
]
],
[
[
"image_level['mo2017']",
"_____no_output_____"
],
[
"image_level['meng2015']",
"_____no_output_____"
],
[
"image_level['hassan2018']",
"_____no_output_____"
],
[
"image_level['tang2017']",
"_____no_output_____"
],
[
"image_level['zhu2016']",
"_____no_output_____"
],
[
"image_level['geetharamani2016']",
"_____no_output_____"
],
[
"image_level['wang2015']",
"_____no_output_____"
],
[
"image_level['singh2016']",
"_____no_output_____"
],
[
"image_level['singh2017']",
"_____no_output_____"
],
[
"image_level['saroj2020']",
"_____no_output_____"
],
[
"image_level['dash2018']",
"_____no_output_____"
],
[
"image_level['fathi2013']",
"_____no_output_____"
],
[
"image_level['imani2015']",
"_____no_output_____"
],
[
"image_level['emary2014']",
"_____no_output_____"
],
[
"image_level['waheed2015']",
"_____no_output_____"
],
[
"image_level['rahebi2014']",
"_____no_output_____"
],
[
"image_level['thangaraj2017']",
"_____no_output_____"
],
[
"image_level['adapa2020']",
"_____no_output_____"
],
[
"image_level['escorcia-gutierrez2020']",
"_____no_output_____"
],
[
"image_level['khan2016']",
"_____no_output_____"
],
[
"image_level['fraz2012b']",
"_____no_output_____"
],
[
"image_level['fraz2012']",
"_____no_output_____"
],
[
"image_level['lupascu2010']",
"_____no_output_____"
],
[
"image_level['marin2011']",
"_____no_output_____"
],
[
"image_level['ricci2007']",
"_____no_output_____"
],
[
"image_level['li2016']",
"_____no_output_____"
],
[
"image_level['barkana2017']",
"_____no_output_____"
],
[
"image_level['tamim2020']",
"_____no_output_____"
],
[
"image_level['frucci2016']",
"_____no_output_____"
],
[
"image_level['moghimirad2012']",
"_____no_output_____"
],
[
"image_level['odstrcilik2013']",
"_____no_output_____"
],
[
"image_level['dash2020']",
"_____no_output_____"
],
[
"image_level['bharkad2017']",
"_____no_output_____"
],
[
"image_level['lupascu2016']",
"_____no_output_____"
],
[
"image_level['kumar2020']",
"_____no_output_____"
],
[
"image_level['narkthewan2019']",
"_____no_output_____"
]
],
[
[
"## Categorization",
"_____no_output_____"
]
],
[
[
"threshold= image_level_threshold\n\nreduced= methods[methods['image_level_consistency_with_fov'].notnull()].reset_index(drop=True)\n\nreduced.loc[reduced['image_level_consistency_with_fov'] > threshold, 'category']= 'FoV'\nreduced.loc[reduced['image_level_consistency_without_fov'] > threshold, 'category']= 'no FoV'\nreduced.loc[(reduced['image_level_consistency_with_fov'] > threshold) & (reduced['image_level_consistency_without_fov'] > threshold), 'category']= 'ambiguous'\nreduced.loc[(~reduced['category'].isin(['FoV',\n 'no FoV',\n 'ambiguous'])), 'category']= 'outlier'",
"_____no_output_____"
]
],
[
[
"## Analysis",
"_____no_output_____"
]
],
[
[
"reduced[['key', 'category']].groupby('category').count()",
"_____no_output_____"
],
[
"reduced[reduced['category'] == 'ambiguous']",
"_____no_output_____"
],
[
"# preparing latex table\ndef prepare_key(x):\n name= x[:-4]\n year= x[-4:]\n name= name[:1].upper() + name[1:]\n return name + ' (' + year + ') \\cite{' + x + '}'\n\nlatex= reduced[['key', 'acc', 'sens', 'spec', 'digits', 'n_image_level', 'image_level_consistency_with_fov', 'image_level_consistency_without_fov', 'category']]\nlatex.loc[latex['category'] == 'no FoV', 'category']= 'all pixels'\nlatex['key']= latex['key'].apply(lambda x: x[0:1].upper() + x[1:])\nlatex['key']= latex['key'].apply(lambda x: ' \\cite{' + x.lower() + '}')\n#latex['key']= latex['key'].apply(prepare_key)\nlatex['n_image_level']= latex['n_image_level'].astype(int)\nlatex['digits']= latex['digits'].astype(int)\nlatex['acc']= latex['acc'].apply(lambda x: ('%.4f' % x)[1:])\nlatex['sens']= latex['sens'].apply(lambda x: ('%.4f' % x)[1:])\nlatex['spec']= latex['spec'].apply(lambda x: ('%.4f' % x)[1:])\nlatex['image_level_consistency_with_fov']= (latex['image_level_consistency_with_fov']*100).astype(int)\nlatex['image_level_consistency_without_fov']= (latex['image_level_consistency_without_fov']*100).astype(int)\nlatex.columns=['Key', '$\\overline{acc}$', '$\\overline{sens}$', '$\\overline{spec}$', '\\rotatebox{90}{Decimal places}', '\\rotatebox{90}{Num. image level fig.}', '\\rotatebox{90}{$H_{\\text{FoV}}$ not rejected (\\%)}', '\\rotatebox{90}{$H_{\\text{all}}$ not rejected (\\%)}', 'Decision']\nlatex",
"/home/gykovacs/anaconda3/envs/retina_vessel_segmentation/lib/python3.7/site-packages/pandas/core/indexing.py:1763: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n isetter(loc, value)\n/home/gykovacs/anaconda3/envs/retina_vessel_segmentation/lib/python3.7/site-packages/ipykernel_launcher.py:10: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n # Remove the CWD from sys.path while we load stuff.\n/home/gykovacs/anaconda3/envs/retina_vessel_segmentation/lib/python3.7/site-packages/ipykernel_launcher.py:11: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n # This is added back by InteractiveShellApp.init_path()\n/home/gykovacs/anaconda3/envs/retina_vessel_segmentation/lib/python3.7/site-packages/ipykernel_launcher.py:13: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n del sys.path[0]\n/home/gykovacs/anaconda3/envs/retina_vessel_segmentation/lib/python3.7/site-packages/ipykernel_launcher.py:14: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n \n/home/gykovacs/anaconda3/envs/retina_vessel_segmentation/lib/python3.7/site-packages/ipykernel_launcher.py:15: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n from ipykernel import kernelapp as app\n/home/gykovacs/anaconda3/envs/retina_vessel_segmentation/lib/python3.7/site-packages/ipykernel_launcher.py:16: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n app.launch_new_instance()\n/home/gykovacs/anaconda3/envs/retina_vessel_segmentation/lib/python3.7/site-packages/ipykernel_launcher.py:17: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n/home/gykovacs/anaconda3/envs/retina_vessel_segmentation/lib/python3.7/site-packages/ipykernel_launcher.py:18: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n/home/gykovacs/anaconda3/envs/retina_vessel_segmentation/lib/python3.7/site-packages/ipykernel_launcher.py:19: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n"
],
[
"latex_str= set_column_spaces(latex.sort_values('$\\overline{acc}$', ascending=False).to_latex(escape=False, index=False), n_cols=9)\nwith open(os.path.join(latex_dir, \"tab2.tex\"), \"w\") as text_file:\n text_file.write(latex_str)",
"_____no_output_____"
],
[
"px.scatter(reduced[reduced['category'].notnull()], x='acc', y='spec', text='key', color='category', width=1000, height=1000)",
"_____no_output_____"
],
[
"markers= ['o', 's', '+', 'x']\n\nlabel_mapping= {'FoV': 'FoV', 'outlier': 'Outlier', 'no FoV': 'All pixels'}\n\nplt.figure(figsize=(5, 4))\nfor i, c in enumerate(['FoV', 'no FoV', 'outlier']):\n plt.scatter(reduced[reduced['category'] == c]['acc'], reduced[reduced['category'] == c]['spec'], label=label_mapping[c], marker=markers[i], s=100)\nplt.scatter([0.9473], [0.9725], label = 'Ann. #2 with FoV', marker='D', s=200)\nplt.scatter([0.9636], [0.9818], label = 'Ann. #2 with all pixels', marker='*', s=300)\nplt.xlabel('Accuracy')\nplt.ylabel('Specificity')\n#plt.gca().set_aspect(1.0)\nplt.tight_layout()\nplt.legend()\nplt.savefig(os.path.join(figures_dir, 'image_level.pdf'))\nplt.show()",
"_____no_output_____"
],
[
"methods= pd.merge(methods.reset_index(drop=True), reduced[['key', 'category']], on='key', how='left')",
"_____no_output_____"
]
],
[
[
"## Writing the results to file",
"_____no_output_____"
]
],
[
[
"methods.to_csv(image_level_results_file, index=False)",
"_____no_output_____"
],
[
"methods.columns",
"_____no_output_____"
],
[
"methods",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
4a18eb8c9744a638ada14898b94929786d4b6dd8
| 226,313 |
ipynb
|
Jupyter Notebook
|
2-Data Cleaning.ipynb
|
tushar176/Data-Science-Salary-Analysis
|
d4ccc48d04840364dd32845bd9eebcb2a404cd40
|
[
"MIT"
] | 1 |
2020-07-05T10:47:26.000Z
|
2020-07-05T10:47:26.000Z
|
2-Data Cleaning.ipynb
|
tushar176/Data-Science-Salary-Analysis
|
d4ccc48d04840364dd32845bd9eebcb2a404cd40
|
[
"MIT"
] | null | null | null |
2-Data Cleaning.ipynb
|
tushar176/Data-Science-Salary-Analysis
|
d4ccc48d04840364dd32845bd9eebcb2a404cd40
|
[
"MIT"
] | null | null | null | 33.742806 | 268 | 0.340113 |
[
[
[
"# Data Cleaning And Feature Engineering",
"_____no_output_____"
],
[
"* Data is very dirty so we have to clean our data for analysis.\n* Also have many missing values represented by -1(have to fix it is very important).",
"_____no_output_____"
]
],
[
[
"import pandas as pd",
"_____no_output_____"
],
[
"data=pd.read_csv('original_data.csv')\ndata.head()",
"_____no_output_____"
],
[
"data.shape",
"_____no_output_____"
],
[
"#droping duplicates\ndata=data.drop_duplicates(data.columns)",
"_____no_output_____"
],
[
"data.shape",
"_____no_output_____"
]
],
[
[
"# Salary column",
"_____no_output_____"
]
],
[
[
"#droping salary which have -1 i.e no salary provided\ndata=data[data['Salary Estimate'] != '-1']",
"_____no_output_____"
],
[
"data.shape",
"_____no_output_____"
],
[
"data.head(20)",
"_____no_output_____"
],
[
"#replacing ₹ and k to 000\ndata['Salary Estimate']=data['Salary Estimate'].apply(lambda x: x.replace('₹','').replace('K','000').replace(',',''))",
"_____no_output_____"
],
[
"data.head()",
"_____no_output_____"
],
[
"data.dtypes",
"_____no_output_____"
],
[
"data['Salary Estimate'][0:50]",
"_____no_output_____"
],
[
"#making another column with 0 1 \n#1 if salary is by hourly else 0\ndata['hourly'] = data['Salary Estimate'].apply(lambda x: 1 if '/hr' in x.lower() else 0)",
"_____no_output_____"
],
[
"#making another column with 0 1 \n#1 if salary is by monthly else 0\ndata['monthly'] = data['Salary Estimate'].apply(lambda x: 1 if '/mo' in x.lower() else 0)",
"_____no_output_____"
],
[
"#removing /hr and /mo\ndata['Salary Estimate']=data['Salary Estimate'].apply(lambda x: x.lower().replace('/hr','').replace('/mo',''))",
"_____no_output_____"
],
[
"#if needed in the future\ndata['min_salary'] = data['Salary Estimate'].apply(lambda x: (x.split('-')[0]))\n",
"_____no_output_____"
],
[
"#check point\ndata.to_csv('clean.csv',index=False)",
"_____no_output_____"
],
[
"df=pd.read_csv('clean.csv')",
"_____no_output_____"
],
[
"def avg_salary(x):\n lst=x.split('-')\n l=len(lst)\n if l>1:\n return (float(lst[1])+float(lst[0]))/2\n else:\n return float(lst[0])\n ",
"_____no_output_____"
],
[
"df['avg_salary'] = df['Salary Estimate'].apply(avg_salary)",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
],
[
"df.shape",
"_____no_output_____"
],
[
"#hourly salary to annual\ndf['avg_salary'] = df.apply(lambda x: x.avg_salary*2000 if x.hourly ==1 else x.avg_salary, axis =1)",
"_____no_output_____"
],
[
"#monthly salry to annual\ndf['avg_salary'] = df.apply(lambda x: x.avg_salary*12 if x.monthly ==1 else x.avg_salary, axis =1)",
"_____no_output_____"
]
],
[
[
"# Company Name Column",
"_____no_output_____"
]
],
[
[
"#cleaning company name\ndf['Company Name']=df['Company Name'].apply(lambda x: x.split('\\n')[0])",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
]
],
[
[
"# Founded column",
"_____no_output_____"
]
],
[
[
"data[data['Founded']==-1]",
"_____no_output_____"
],
[
"#adding new column company_age\n#age of company \ndf['company_age'] = df.Founded.apply(lambda x: x if x <1 else 2020 - x)",
"_____no_output_____"
]
],
[
[
"# job description Column",
"_____no_output_____"
]
],
[
[
"import numpy as np\ndef clean_des(x):\n try: \n return x.replace('\\n', ' ')\n \n except AttributeError:\n return np.NaN",
"_____no_output_____"
],
[
"#cleaning job description\n#job description have an values\ndf['Job Description']=df['Job Description'].apply(clean_des)",
"_____no_output_____"
],
[
"df.tail()",
"_____no_output_____"
]
],
[
[
"# Job Title Column",
"_____no_output_____"
]
],
[
[
"df['Job Title'].value_counts()",
"_____no_output_____"
],
[
"def title_simplifier(title):\n if 'data scientist' in title.lower() or 'data science' in title.lower():\n return 'data scientist'\n elif 'data engineer' in title.lower():\n return 'data engineer'\n elif 'analyst' in title.lower():\n return 'analyst'\n elif 'machine learning' in title.lower():\n return 'machine learning engineer'\n elif 'manager' in title.lower():\n return 'manager'\n elif 'director' in title.lower():\n return 'director'\n else:\n return 'other'",
"_____no_output_____"
],
[
"#simplifing titles to simplify thw work as there are 282 unique values which have the mostly same work\ndf['job_title_simplified'] = df['Job Title'].apply(title_simplifier)",
"_____no_output_____"
],
[
"df['job_title_simplified'].value_counts()",
"_____no_output_____"
],
[
"#if required for analysis\ndf['number_competitors'] = df['Competitors'].apply(lambda x: len(x.split(',')) if x != '-1' else 'not provided')\n",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
]
],
[
[
"# Revenue Column\n* exploring revenue column as it can be a important feature in analysis",
"_____no_output_____"
]
],
[
[
"# replace -1 values with NaN (missing value)\ndf = df.replace(to_replace = -1, value = np.nan)",
"_____no_output_____"
],
[
"#null value in revenue\n#df[df['Revenue']=='Unknown / Non-Applicable']",
"_____no_output_____"
],
[
"#making another column same as Revenue so that we can make changes to this new column that will not effect origial Revenue column.\ndf['revenue']=df['Revenue']",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
],
[
"df['revenue']=df['revenue'].apply(lambda x: x.replace('Unknown / Non-Applicable','-1'))",
"_____no_output_____"
]
],
[
[
"### cleaning revenue column.",
"_____no_output_____"
]
],
[
[
"#replaceing all the characters that are not numbers\ndf['revenue']=df['revenue'].apply(lambda x: x.replace('₹','').replace('+','').replace('INR','').replace('()','').replace('billion',''))",
"_____no_output_____"
],
[
"#making another column with 0 1 \n#1 if revenue is in million else 0\ndf['Revenue_million'] = df['revenue'].apply(lambda x: 1 if 'million' in x.lower() else 0)",
"_____no_output_____"
],
[
"#replaceing million\ndf['revenue']=df['revenue'].apply(lambda x: x.replace('million',''))\ndf['revenue']=df['revenue'].apply(lambda x: x.replace('to','-'))",
"_____no_output_____"
]
],
[
[
"### Making another column for avg of revenue as original revenue have values in form of ranges but we want a specific value for analysis.",
"_____no_output_____"
]
],
[
[
"#there are -1 so when split on - it raise an error that is why use try block \ndef avg_revenue(x):\n lst=x.split('-')\n l=len(lst)\n if l>1:\n try:\n return (float(lst[1])+float(lst[0]))/2\n except:\n return np.nan\n else:\n return float(lst[0])\n ",
"_____no_output_____"
],
[
"df['avg_revenue'] = df['revenue'].apply(avg_revenue)",
"_____no_output_____"
],
[
"#### making unit of average revenue as uniform\ndf['avg_revenue'] = df.apply(lambda x: x.avg_revenue/1000 if x.Revenue_million ==1 else x.avg_revenue, axis =1)",
"_____no_output_____"
],
[
"#check percentage of NaN data in every column\nround((100*df.isnull().sum())/len(df.index),2)",
"_____no_output_____"
]
],
[
[
"#### Avg_Revenue have about 47% of missing values.It is said that column that have missing value % greater than 30 will be droped but Revenue can be a important column for analysis so we will fill missing values bt using advanced techniques like KNN-Imputer.\n#### AS we will fill there values there will be possiblity that analysis around revenue may be wrong we will see it what is the effect of revenue on salary.",
"_____no_output_____"
]
],
[
[
"#import required libraries from advanced imputation techniques\nfrom sklearn.impute import KNNImputer",
"_____no_output_____"
],
[
"pd.set_option('display.max_rows',None)",
"_____no_output_____"
],
[
"X=df.drop(['Company Name', 'Competitors', 'Headquarters', 'Industry',\n 'Job Description', 'Job Title', 'Location','Founded','revenue',\n 'Salary Estimate', 'Sector', 'Size', 'Type of ownership', 'hourly',\n 'monthly', 'min_salary','Revenue','company_age','Rating','avg_salary',\n 'job_title_simplified', 'number_competitors', 'Revenue_million'],axis=1)\nX",
"_____no_output_____"
],
[
"imputer = KNNImputer(n_neighbors=3)\ndf['avg_revenue']=imputer.fit_transform(X)",
"_____no_output_____"
],
[
"df['avg_revenue']=round(df['avg_revenue'])",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
],
[
"df.columns",
"_____no_output_____"
],
[
"df2=df.drop(columns=[ 'hourly', 'monthly', 'min_salary','number_competitors', 'revenue','Revenue_million'])",
"_____no_output_____"
],
[
"df2.head()",
"_____no_output_____"
],
[
"df2.to_csv('final_cleaned_data.csv',index=False)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a18ecfbaf311e6999034137ab88289549fc629c
| 892,883 |
ipynb
|
Jupyter Notebook
|
Notebook/Technical Notebook.ipynb
|
Debanjan-C/Adult_Income
|
2ae2ea7485ff4ab63681a505e69169c022cd8690
|
[
"Apache-2.0"
] | null | null | null |
Notebook/Technical Notebook.ipynb
|
Debanjan-C/Adult_Income
|
2ae2ea7485ff4ab63681a505e69169c022cd8690
|
[
"Apache-2.0"
] | null | null | null |
Notebook/Technical Notebook.ipynb
|
Debanjan-C/Adult_Income
|
2ae2ea7485ff4ab63681a505e69169c022cd8690
|
[
"Apache-2.0"
] | 1 |
2020-12-08T17:45:26.000Z
|
2020-12-08T17:45:26.000Z
| 207.937354 | 369,840 | 0.890207 |
[
[
[
"# Adult Census Income",
"_____no_output_____"
],
[
"Debanjan Chowdhury Data 602",
"_____no_output_____"
],
[
"# Frame the problem and look at the big picture\n",
"_____no_output_____"
],
[
"## Abstract and Summary\n\nAccording to an article in the US News - A World report, they were evaluating how indidividuals did not fill out paper works for a long time and the numbers may have been misled. In the year of 2018, an inspecotr general in Rhode Island released a report where the census goers would go to the homes of individuals who had not filled out their forms in a long while. (US News) Another intersting information comes from Federal News Network. According to an article in Federal News Network, the legislatures in the house were planning to apss a bill wher eyou would have about $11,000 in fines in you lie or spread any form of misinformation in the census. This is scary and I also wanted to do this project, because this question came to my mind about why one would try lying on a census and was wondering how they would figure out. Therefore, this dataset came to my mind as it was about census and it would help me get an idea of how to cross check when someone may be lying. (Federal News Network). These two are the main sources of motivation behind why I wanted to work on this project. After seeing this information, I noticed this dataset and it contained information about individuals on a census, there data and the salary ranges of whether it is greater than 50K or less than or equal to 50K salary. The government has recently mentioned that they will provide benefits to individuals who may ahve a salary less than 50K. My colleagues have provided me with a dataset with 32561 rows or that many individuals and 15 columns initailly. The data contains information about an individuals age, education, occupation, race, gender, capital gain, capital loss, hours per week that they work and income (target). The targer data tells us if the salry is in the specific ranges as mentioned above. My role in the company as a data scientist is to verify whether the the salary information is correct and if it is accurate or if someone is bluffing on a specific income for benefits. Initially, I conducted data cleaning to check for missing values and marked them as unknowwn as some individuals may have been unemployed in terms of their job or profession and they may not have a salary. Therefore, I added that step. The next step was where I removed a column as it was redundant and the categorical counting of education categories started from one and not 0 which would have affected us during modelling. Further, I conducted Exploratory Data Analysis to evaluate the dataset further. Next, I conducted feature engineering to convert all letters and other non-numeric categorical values into numeric values as models would use only numeric values. I used label encoding for that. Following that step, I would scale the datasets to ensure that they all are within the same range. After those steps, I developed a logistic regression model, decision tree and random forest models where I split the data into testing and training sets and tested the x values with the targets. In order to verify the accuracy of the models, I used cross validation to check for the mean and standard devation to verify the model accuracy. The cross validation would show the mean and standard devation and that gives an idea of whether the accuracy amount may be correct or not. Following that, I used a confusion matrix and found the accruacy, precision and recall scores for each of the models. I compared the model and the Random forest came to be the best. In order to finetune the model, I removed some of the main outliers from specific columns and was able to improve scores once again. The removal of outliers also showed the random forest as highest in scores. One common thing all models showed was that the true negatives were highest, but in all cases the true positive was larger than the true negative when we went to removing outliers in the confusion matrix.",
"_____no_output_____"
],
[
"## Business Problem \n\nBusiness Problem: According to an article in the US News - A World report, they are evaluating how indidividuals have not filled out paper works for a long time and the numbers may have been misled. Somewhere aorund 2018, an inspecotr general in Rhode Island released a report where the census goers would go to the homes of individuals who had not filled out their forms in a long while. (US News) This was one of my main motivations on why I wanted to look into the project. One misinformation could cause a lot of trouble in the modellings and leagal trouble also. According to an article in Federal News Network, the legislatures in the house were planning to apss a bill wher eyou would have about $11,000 in fines in you lie or spread any form of misinformation in the census. This is scary and I also wanted to do this project, because this question came to my mind about why one would try lying on a census and was wondering how they would figure out. Therefore, this dataset came to my mind as it was about census and it would help me get an idea of how to cross check when someone may be lying. (Federal Newws Network).\n\nSource: \n- https://www.usnews.com/news/us/articles/2020-02-03/report-census-hasnt-tested-tasks-to-catch-people-who-lie\n- https://federalnewsnetwork.com/federal-newscast/2020/03/spreading-false-information-about-2020-census-could-land-you-in-jail-if-new-bill-becomes-law/\n\nFor this project, I am a Data Scientist in the US Census bureau. Recently, the government has announced a special package or benefit for all individuals who are earning less than 50K in temrs of salary. My colleagues collected the data of different individuals who are over the age of 16 and my job is to double check whether their claims on salary are accurate based on all other types of data that they are providing or if they are bluffing in order to get the benefits. There are many details, but the main ones collected from individuals are their age, education, occupation, race, gender, capital gain, capital loss, hours per week that they work and income (Which was used) as a target. Our dataset has about 32561 rows or that many individuals and 15 columns initailly. I removed one of them as it showed categorical labels for individuals and there education, but it did not start from 0 and started from one which would cause trouble when modelling. The target value has details about whether an individuals salary was greater than or less than/equal to 50K. My goal was to verify the accuracy about whether the salary range of aspecific individual was correct or seemed fishy. The dataset will give us an understading of capital gains or loss an individual had in terms of salary and I also noticed specific races where in higher propertion than the others, so many outliers came into existence. I used different types of methods and technologies like data cleaning, feature engineering and exploratory data analysis to undersand the data in more details and evaluate where to be alert or pay attention. Following that, I use logistic regression, decision trees and random forest methods to verify if the income category mentione was correct or not. I compared all three models and realized that one is relatively better than the other one. In order to help enhance and improve the model in the fine tuning step, I decided to remove major outliers in the data columns or features that may have been playing a role with pulling the overall score down. After removing the outliers, I realized that the the accuracy, precision and the recall scores rose by a specfic amount. I also noticed that the random forest model showed the best scores in terms of the accuracy.\n\n## ML Problem\n\nThe corresponding machine learning problem is to use Logistic Regression, Decision tree and Random Forest model to evaluate which model shows the best accuracy results and will help us determine the accurac y behind whether all the feature match the target dataset. Our main goal here is to develop a model that is higher than an accuracy score of 75% as 75% was what out score was when we divided the number of individuals who earned more than 50K salary with the total number of indidivuals.",
"_____no_output_____"
],
[
"## Getting the data\n### Dataset\n\nThe dataset used for this project was found at \nhttps://www.kaggle.com/uciml/adult-census-income.\nThe dataset was inspired by the intial data in the UCI website: \nhttp://archive.ics.uci.edu/ml/datasets/Adult\nThis dataset contains information that was extracted from a census in a past year. The individuals are above age 16 and the dataset contains information about individuals, their work type (private company employee, government employee, etc), their education levels, marital status, occupation (like technical field, farmer, etc), their race and many more information. The dataset then shows us the income column as the target column and that shows us if an individuals salary is above or below $50K. \n\n### Data Dictionary\nOur dataset has about 32561 rows or that many individuals and 15 columns initailly. I removed one of them as it showed categorical labels for individuals and there education, but it did not start from 0 and started from one which would cause trouble when modelling. So 14 columns\n\n- age: (int data type), numerical data. It contains each of the individuals - -- workclass: (string/object data type), cateogorical data. It contains the details of what type of work type each individual has liek private job, government job, etc. \n- fnlwgt: (int data type), numerical data. This is a final weight ranking section that is determined by the amount of folks who are over 16, Hispanic and they evaluate the weight by the race, age and sex. \n- education: (string/object data type), cateogorical data. It contains the education completed levels of each of the individuals int he census. Some have completed up to high school, some have gone to college, some are still in school.\n- education-num: (int data type), categorical data. This column contains a value reprsenting the education level of each individual. However, we dropped this column as it is redundant and the categorical count does not start from 0 and starts from 1. It could affect the model unless every categorical value starts from 0 when it is converted to numeric value.\n- marital-status: (int data type), categorical data. It contains information abput thee marital status of individuals like unmarried, married, divorce, etc.\n- occupation: (string/object data type), categorical data. It contains information about what type of job or roles each individual has like manager, executive, etc. \n- relationship: (string/object data type), categorical data. It contains detials about the individuals relationship status like is he a wife, a husband, has a child, unmarried, etc. \n- race: (string/object data type), categorical data. It contains details about the individuals ethnicity - White, Black, Hispanic, etc.\n- sex: (renamed it to gender), categorical data. It mentions whether an individual is a mail or female.\n- capital-gain: (int data type), numeric data. This column contains the capital gain amount that an individual had. If they had none then it says 0. \n- capital-loss: (int data type), numeric data. This column contains the capital loss amount that an individual had. If they had none then it says 0. \n- hours-per-week: (int data type), numeric data. This column contains the amount of week each individual worked. \n- native-country: (string/object data type), categorical data. It tells us each indiviuals native country.\n- income: (string/ object data type), categorical data. It tells us whether an individuals salary is greater than or less than and equal to 50K dollar salary. This is our target value. ",
"_____no_output_____"
],
[
"## Data Preparation and Exploring data",
"_____no_output_____"
],
[
"### Import potential or necessary libraries",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport numpy as np",
"_____no_output_____"
]
],
[
[
"### Loading the dataset",
"_____no_output_____"
]
],
[
[
"df = pd.read_csv('adult.csv')",
"_____no_output_____"
]
],
[
[
"**Taking a look at the number of rows and columns**",
"_____no_output_____"
]
],
[
[
"print('Number of columns and rows: ', df.shape)",
"Number of columns and rows: (32561, 15)\n"
]
],
[
[
"**Getting more detailed information about the dataset like the columns, data types and all.**",
"_____no_output_____"
]
],
[
[
"df.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 32561 entries, 0 to 32560\nData columns (total 15 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 age 32561 non-null int64 \n 1 workclass 32561 non-null object\n 2 fnlwgt 32561 non-null int64 \n 3 education 32561 non-null object\n 4 education.num 32561 non-null int64 \n 5 marital.status 32561 non-null object\n 6 occupation 32561 non-null object\n 7 relationship 32561 non-null object\n 8 race 32561 non-null object\n 9 sex 32561 non-null object\n 10 capital.gain 32561 non-null int64 \n 11 capital.loss 32561 non-null int64 \n 12 hours.per.week 32561 non-null int64 \n 13 native.country 32561 non-null object\n 14 income 32561 non-null object\ndtypes: int64(6), object(9)\nmemory usage: 3.7+ MB\n"
],
[
"df.head()",
"_____no_output_____"
]
],
[
[
"It seems that education number column was an integer identifier for the edication field. However, it was the categorical identification from 1 and not 0 like they do it in label encoding. Later, on we will need to do label encoding to convert catgorical to numericals for the classification algorithms. I will also be renaming the sex column to gender. ",
"_____no_output_____"
]
],
[
[
"df['education.num'].unique()",
"_____no_output_____"
],
[
"df = df.drop(columns={'education.num'})\ndf = df.rename(columns= {\"sex\": \"gender\"})\ndf.head()",
"_____no_output_____"
]
],
[
[
"## Data Cleaning and analyzing",
"_____no_output_____"
],
[
"We drop any duplicate calues if there may be any type of duplicate values. ",
"_____no_output_____"
]
],
[
[
"df = df.drop_duplicates()",
"_____no_output_____"
]
],
[
[
"We check for null values and it seems there are no nulls, but there are many quesstion marks in the dataset and they not be consideredd null values and in order to know how many missing values there are in each column, we would need to convert them to nulls. It would be essential to know which columns have null values to evaluate if they represent anything or can be removed or replaced.",
"_____no_output_____"
]
],
[
[
"df.isna().sum()",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
]
],
[
[
"We will be setting a null value in place of all question marks to represent missing values ",
"_____no_output_____"
]
],
[
[
"df = df.replace('?', np.NaN)\ndf.head()",
"_____no_output_____"
]
],
[
[
"Below, we are double checking null values and where they are. As we can notice the null values are in the workclass, occupation and native country field. It is possible that someone who may have recently been laid off or looking for a job may not have an employment at the time the census collected data and their salary may be 0. It coul also be that an individual did want to report their employemnt details by preference and same goes with the information regarding their native country. Therefore, we can rename it to unknown. ",
"_____no_output_____"
]
],
[
[
"df.isna().sum()",
"_____no_output_____"
]
],
[
[
"Some individuals may be unemployed at the time of census survey, could have been laid off, etc. Therefore, there workclass and occupation may not have anything on it, so we set it as unknown. Along with that, some may have preferred not to give information about their native countries and so, we set it as unknown. As it is a string categorical value, we did not set it to 0 to maintain consitency. Later on, label enconding will be used to convert these values to numeric for modelling.",
"_____no_output_____"
]
],
[
[
"df = df.fillna(\"unknown\")\ndf.head()",
"_____no_output_____"
]
],
[
[
"## Exploratory Data Analysis and Data Visualization",
"_____no_output_____"
],
[
"We conducted additional EDA and developed further data visualizations to get a better idea on each of the features used in the dataset, along with their distributions and the relation of key features to target value of income and count of specific values in a column. We also triued to rvaluate the correlation with our target valur or the value in the y-axis which is income in this case. ",
"_____no_output_____"
]
],
[
[
"df.hist(figsize=(16,20), bins=30)",
"_____no_output_____"
],
[
"sns.pairplot(df)",
"_____no_output_____"
]
],
[
[
"Below is the data of each of the marital status values and their respective calculations. Those who are married with a civil spouse exist in the highest count. Those whose spouse is absent is towards the lowest.",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(15, 5))\nsns.countplot(x=\"marital.status\", data=df)",
"_____no_output_____"
]
],
[
[
"Below is the data of all of the educations obtained by each of the individuals and the count or the number of people for each of the educational qualification sections. It seems the individuals who have been **High School graduates** or have attended some levels of **college** are in the highest numbers and those who have attended up to **pre-school and elementary school** are in the lowest counts.",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(17, 5))\nsns.countplot(x=\"education\", data=df)",
"_____no_output_____"
]
],
[
[
"Those individuals in the census, whose race is White is in highest numbers and infidicuals whose race is classified as others or American Indian is in the lowest numbers. It seems this column may also be playing a significant role in the modelling section. ",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(15, 5))\nsns.countplot(x=\"race\", data=df)",
"_____no_output_____"
]
],
[
[
"Below is a visualization taht is helping us understand how many individuals income status is divided based on each of the races. It seems mostly those in who have income less than or equal to 50k are in higher number and the diference between the folkss who have less than or equal to 50k and the folks who have more than 50k is significantly higher for folks who are white.",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(10,7))\nplt.title('Income relation to race')\nsns.countplot(x='race', hue='income',data=df)",
"_____no_output_____"
]
],
[
[
"Below is a distribution plot of the age groups of all of the individuals who are in our dataset. It seems that the largest amount of individuals are below within their late teens to their 40's in age and it also seems that as it goes over the proportion of individuals who are above the age of 40 sowly goes down. ",
"_____no_output_____"
]
],
[
[
"sns.distplot(df['age'])",
"_____no_output_____"
]
],
[
[
"Below, is a distribution of all of the individuals and how many hours they are working. It seems that most of the individuals work **40** hours a week which should be considered **full time** jobs.",
"_____no_output_____"
]
],
[
[
"sns.distplot(df['hours.per.week'])",
"_____no_output_____"
]
],
[
[
"Below is a counting of how many male and female are in the dataset. It seems that there are more males than females in the dataset. ",
"_____no_output_____"
]
],
[
[
"plt.title('The count of male and female')\nsns.countplot(x='gender',data=df)",
"_____no_output_____"
]
],
[
[
"Below is a comparison of male and female and their salaary ranges. It seems that male have a higher overall salary and most of the individuals get **less than or equal to 50K** in comparison to **more than 50K**.",
"_____no_output_____"
]
],
[
[
"plt.title('Income relation to gender')\nsns.countplot(x='gender',hue='income',data=df)\n",
"_____no_output_____"
]
],
[
[
"In the visualization below, we can see that the individuals who get **more than 50K salary** seem to be working more than **40 hours a week**. And those who get **less than or equal to 50K salary** seem to be working **40 or less hours** in most of the cases.",
"_____no_output_____"
]
],
[
[
"sns.boxplot('income', y= 'hours.per.week', data= df)",
"_____no_output_____"
]
],
[
[
"## Prepare the data to better expose the underlying data patterns to ML algorithms",
"_____no_output_____"
],
[
"## Feature Engineering \n\nThis is the step where we will be converting categorical variables to numerical variables. These values will be used for modelling purposes. We will be using the label encoding techniques to conert the categorical (non-numeric) values into a specific numerical value and an unique numeric value will be assigned to a specific categorical value in order to represent it. If the dataset has 2 variables then it would replaced them with 0 and 1's and if they had more categorical variables than two then it would used more numbers to identify each of the specific categories. Two variables would be binary classification and more classes would be binary classification.",
"_____no_output_____"
],
[
"**Below we can see the count of each individual and their salary ranges.**",
"_____no_output_____"
]
],
[
[
"df['income'].value_counts()",
"_____no_output_____"
]
],
[
[
"Below is the ratio of eeach of our categories and it seems that the ratio of individuals whose salary is higher than 50K is a higher ratio than those who have a salary of lower than 50K.\n\n- Problem: Given a set of informatioon about each individual in the census, we are trying to figure out whther their salary exceeds 50K or it does not and according ot the business rule those who would be euqal to or less than 50K would be eligible or specific government benefits.\n\n- Model Goal: Our main goal here is to develop a model that is higher than an accuracy score of 75%.",
"_____no_output_____"
],
[
"**Below is our accuracy score or the score of indivuals who earn more than 50k and the proportion.**",
"_____no_output_____"
]
],
[
[
"24698/(24698+7839)",
"_____no_output_____"
]
],
[
[
"**Below is our score of indivuals who earn more than 50k and its proportion and the score of indivuals who earn less then 50K salary and their proportion.**",
"_____no_output_____"
]
],
[
[
"print(\"Ratio of salary greater than 50K:\", 24698/(24698+7839))\nprint(\"Ratio of salary lower than 50K:\", 7839/(24698+7839))",
"Ratio of salary greater than 50K: 0.7590742846605403\nRatio of salary lower than 50K: 0.2409257153394597\n"
]
],
[
[
"We make a copy of the dataframe just incase. We will be moving on to the step where we use label encoding to replace categorical values in wordings to numerical values. ",
"_____no_output_____"
]
],
[
[
"df_new = df.copy()\ndf_new.head()",
"_____no_output_____"
]
],
[
[
"Importing the Label Encoder Library from Scikit Learn. In the step, we will be converting all categorical columns to numeric. ",
"_____no_output_____"
]
],
[
[
"from sklearn.preprocessing import LabelEncoder",
"_____no_output_____"
]
],
[
[
"Below, we are using the technique to fit the label ecoder to a specific column value and then we apppply the transformations where they will take each of the values in the categorical column and it would assign a numeric value to it. We do those steps for 8 of the categorical columns. ",
"_____no_output_____"
]
],
[
[
"le = LabelEncoder()\nle.fit(df_new['income'])\ny = le.transform(df_new['income'])\ndf_new['income'] = y",
"_____no_output_____"
],
[
"le.fit(df['workclass'])\nx1 = le.transform(df_new['workclass'])\ndf_new['workclass'] = x1",
"_____no_output_____"
],
[
"le.fit(df['marital.status'])\nx2 = le.transform(df_new['marital.status'])\ndf_new['marital.status'] = x2",
"_____no_output_____"
],
[
"le.fit(df['occupation'])\nx3 = le.transform(df_new['occupation'])\ndf_new['occupation'] = x3",
"_____no_output_____"
],
[
"le.fit(df['relationship'])\nx4 = le.transform(df_new['relationship'])\ndf_new['relationship'] = x4",
"_____no_output_____"
],
[
"le.fit(df['race'])\nx5 = le.transform(df_new['race'])\ndf_new['race'] = x5",
"_____no_output_____"
],
[
"le.fit(df['gender'])\nx6 = le.transform(df_new['gender'])\ndf_new['gender'] = x6",
"_____no_output_____"
],
[
"le.fit(df['native.country'])\nx7 = le.transform(df_new['native.country'])\ndf_new['native.country'] = x7",
"_____no_output_____"
],
[
"le.fit(df['education'])\nx8 = le.transform(df_new['education'])\ndf_new['education'] = x8",
"_____no_output_____"
]
],
[
[
"Below is out updated dataset with each of the categorical columns converted into numerical columns and we will be using this data for modelling. ",
"_____no_output_____"
]
],
[
[
"df_new.head()",
"_____no_output_____"
]
],
[
[
"### Data Preparation and Transformation for Modelling",
"_____no_output_____"
],
[
"We would intially split the data into training and testing data. The training dataset is what will be used for now and the testing dataset will be used for later after the models are developed, we use it to test the values in the model.",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import train_test_split",
"_____no_output_____"
]
],
[
[
"We will be taking every column in x except the the income column which is the target column.",
"_____no_output_____"
]
],
[
[
"x = df_new.drop(['income'], axis = 1)\ny = df_new['income']",
"_____no_output_____"
],
[
"x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state = 1)",
"_____no_output_____"
]
],
[
[
"### Scaling Data",
"_____no_output_____"
],
[
"We are scaling the our data size to ensure that they are all in the same range of numbers or scaled into that range. Below, we import a scaler and declare a standard scaler or instantiate it. ",
"_____no_output_____"
]
],
[
[
"from sklearn.preprocessing import StandardScaler\nstd_scaler = StandardScaler()",
"_____no_output_____"
]
],
[
[
"We are fitting our scaler to the training dataset and using the transformation features to create a scaling of the training and testing datasets. ",
"_____no_output_____"
]
],
[
[
"std_scaler.fit(x_train)\nx_train_scaled = std_scaler.transform(x_train)\nx_test_scaled = std_scaler.transform(x_test)",
"_____no_output_____"
]
],
[
[
"## Explore many different models and short-list the best ones",
"_____no_output_____"
],
[
"## Modeling & Model Evaluation",
"_____no_output_____"
],
[
"This is where we are devloping our models. We will be fitting our training sets into the model for the x scaled training set and the y training data which contains the target. Initially, we will be importing respective libraries and then we would need to create or instantiate a logistic regression model. We will also be evaluating the accuracy score of the model and a prediction which will set what values the x scaled ddatas will predict. Following that step, we would use a confusion matrix to see the accuracy and check for details like true positive (when predcition and actual results are in the positive side or saying no risk), true negative, (when the prediction and actual data both say that things are in the negative side or you are in the risk zone for our project, false positive (when we predict things are on positive side, but in reality they may not be on that side), false negative (when we predict things are on the negative side, but in reality they are on the positive side). \n\nAfter al of that is over, we find the cross validation to accurately depict if out models accuracy is correct or not. We see the mean +/- stnd. deviation.\n\nFollowing, the logistic regression tests, we would also test the datasets using the decision tree model and random forest and compare our results.",
"_____no_output_____"
],
[
"#### Logistics Regression",
"_____no_output_____"
]
],
[
[
"from sklearn.linear_model import LogisticRegression \nlogreg = LogisticRegression(penalty = 'none', random_state = 1) \nlogreg.fit(x_train_scaled, y_train) \npredictions = logreg.predict(x_train_scaled)\nscore = logreg.score(x_train_scaled, y_train)",
"_____no_output_____"
]
],
[
[
"In the cell below, we are developing confusion matrix for logistics regression to show the details all predicted datas vs actual data. We look at which one are predicted to be true and actually true, which one is predicted but now we evaluate where the predictions match with the accurate results.",
"_____no_output_____"
]
],
[
[
"from sklearn import metrics\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score\n\ncm = metrics.confusion_matrix(y_train, predictions)\ncm",
"_____no_output_____"
]
],
[
[
"Below, we are displaying a visualization of the Confusion matrix for the training datasets",
"_____no_output_____"
],
[
"In the confusion matrix blow, we notice that the score is about 80% accurate. This evaluates that it would be correct about 80 percent of the time, but approximately 20% of the time it would be incorrect in the values it gives us. It seems that the number of true negatives arre significantly higher than the others and it is good to predict somethin as bad when it actually may be negative. The false positive and false negative cases could also indicate some concern. False negative cases are significnatly higher than the true positive cases.",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(9,9))\nsns.heatmap(cm, annot=True, fmt=\".3f\", linewidths=.5, square = True, cmap = 'Pastel1')\nplt.ylabel('Actual label');\nplt.xlabel('Predicted label');\nall_sample_title = 'Accuracy Score for training data: {0}'.format(score)\nplt.title(all_sample_title, size = 15);\nplt.show()",
"_____no_output_____"
]
],
[
[
"**Cross Validation** for Logistics Regression",
"_____no_output_____"
],
[
"In this step, we are cross validating out model. We are doing 5 splittings. The x values are set as the scaled training dataset and the y values are set as the also we for it to return training score and return the estimator. The verbose is 2 for the machine to show us what is going on. ",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import cross_validate",
"_____no_output_____"
],
[
"cv_fivefold = cross_validate(estimator= logreg, \n X = x_train_scaled,\n y = y_train,\n cv = 5,\n return_train_score= True, \n return_estimator= True, \n verbose = 2)",
"[CV] ................................................................\n[CV] ................................................. , total= 0.0s\n[CV] ................................................................\n[CV] ................................................. , total= 0.0s\n[CV] ................................................................\n[CV] ................................................. , total= 0.0s\n[CV] ................................................................\n[CV] ................................................. , total= 0.0s\n[CV] ................................................................\n[CV] ................................................. , total= 0.0s\n"
]
],
[
[
"This is the cross validation data of our training and testing scores.",
"_____no_output_____"
]
],
[
[
"print(cv_fivefold['train_score'])\nprint(cv_fivefold['test_score'])",
"[0.80146953 0.80358258 0.8036306 0.80199779 0.80253554]\n[0.80733769 0.79600461 0.79984633 0.80656934 0.80230548]\n"
]
],
[
[
"Below, we are looking for the mean of the validation and the standard deviation of it to verify the overall model accuracy. This is the overall summary of logistic regressions without any type of regularization. Our mean is around 80% and there is a standard devaition of about 0.004. Thi is a fairly decent score",
"_____no_output_____"
]
],
[
[
"validation_mean = cv_fivefold['test_score'].mean()\n\nvalidation_std = cv_fivefold['test_score'].std()\n\nprint('Logistic Regression 5-fold cv results (Accuracy) %.3f =/- %.3f'%(validation_mean, validation_std))",
"Logistic Regression 5-fold cv results (Accuracy) 0.802 =/- 0.004\n"
]
],
[
[
"**Below are the accuracy, precision and recall scores of the model for Logistic Regression.**",
"_____no_output_____"
],
[
"- The accuracy score is calculated by dividing the correct prediction amount by the number of total cases.\n- Then the precision score is calculated by the number of true positives by the number of condition positives (sum of the true positive and alse negative). \n- The recall score is the division of the true positives with the number of predicted positives (sum of true and false postives).",
"_____no_output_____"
]
],
[
[
"print('Log reg accruacy score:', accuracy_score(y_train, predictions))\nprint('Log reg precision score:', precision_score(y_train, predictions))\nprint('Log reg recall score:', recall_score(y_train, predictions))",
"Log reg accruacy score: 0.8028352990894772\nLog reg precision score: 0.6951983298538622\nLog reg recall score: 0.3194755356571794\n"
]
],
[
[
"According to the results above, our recall score is fairly lower as most of our data was in the negative sections and there were more true negatives than true positives. ",
"_____no_output_____"
],
[
"### Decision tree",
"_____no_output_____"
],
[
"Decision tree will take the predictor space and break it into a number of different regions. We would break the individual datasets in the columns to smaller parts like if we have a dataset column calculating the income. We would evalaute if the income is larger than a specific number and then we would continue to the next node of the tree from the root node if it is. Then we would continue to the terminal node from root node. If not and we find a condition that is not further divideable we will stop. In our example, I set a maximum leaf nodes the decision tree can make is set to 15 for experimental purposes as I feel we have many binary classification, multiclass classification and the dataset is fairly large, so I felt 15 max nodes would allow it to find sufficient conditions.",
"_____no_output_____"
],
[
"We are importing the decision tree model and instantiating it. After that, we are trying to fit our scaled x training and y training datasets. The decision tree has a root node where it would have a conditions based on your dataset and it would move to the next node based on the condition. Like if someones is of a specific race then go to node a or b. It would continue till all conditions are satisfied and no nodes remaining. Below, we also find a prediction value and an accruacy score for the model. The score will be calculated by taking the x scaled values training and the y training values. Then the prediction will only store values predicted after seeing the x scaled training dataset. ",
"_____no_output_____"
]
],
[
[
"from sklearn.tree import DecisionTreeClassifier",
"_____no_output_____"
],
[
"clf = DecisionTreeClassifier(max_leaf_nodes=15, random_state = 1)\nclf.fit(x_train_scaled, y_train)\nscore_dt = clf.score(x_train_scaled, y_train)\nprediction_dt = clf.predict(x_train_scaled)",
"_____no_output_____"
]
],
[
[
"Below, we have developed our confusion matrix that takes the y training data set and the predictions made from the x scaled training data and it will show us the value comparison of what was correct and what not.",
"_____no_output_____"
]
],
[
[
"cm_dt = metrics.confusion_matrix(y_train, prediction_dt)\ncm_dt",
"_____no_output_____"
]
],
[
[
"Below, we have made the confusion matrix into a visualization format. IWe can see that the score is about 84%. This score shows that the decision tree seems to do better than the linear regression model. This time like the previous one, we can see there are more actual negatives than the positive, but the difference between false negative and tru positive is not as high.",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(9,9))\nsns.heatmap(cm_dt, annot=True, fmt=\".3f\", linewidths=.5, square = True, cmap = 'Pastel1')\nplt.ylabel('Actual label');\nplt.xlabel('Predicted label');\nall_sample_title = 'Accuracy Score for training data: {0}'.format(score_dt)\nplt.title(all_sample_title, size = 15);\nplt.show()",
"_____no_output_____"
]
],
[
[
"**Cross Validation** for Decision tree\n\nWe are doing 5 splittings when we are doing cross validations. We set the x values as scaled training values, set the y as y training value also we for it to return training score and return the estimator. We set the verbose is 2 for the machine to show us what is going on. ",
"_____no_output_____"
]
],
[
[
"cv_fivefold_dt = cross_validate(estimator= clf, \n X = x_train_scaled,\n y = y_train,\n cv = 5,\n return_train_score= True, \n return_estimator= True, \n verbose = 2)",
"[Parallel(n_jobs=1)]: Using backend SequentialBackend with 1 concurrent workers.\n[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s\n"
]
],
[
[
"Below are the cross validation scores for our training and testing scores.",
"_____no_output_____"
]
],
[
[
"print(cv_fivefold_dt['train_score'])\nprint(cv_fivefold_dt['test_score'])",
"[0.84483504 0.84569947 0.84521923 0.84699611 0.84690741]\n[0.84421821 0.84594698 0.84863619 0.84805993 0.83957733]\n"
]
],
[
[
"Over here, we find the mean of the validation and the standard deviation to verify the overall model accuracy. This is our baseline summary of decision tree model. Our mean is around 84.5 and there is a standard devaition of about 0.003 and this shows a better one than the log regression.",
"_____no_output_____"
]
],
[
[
"dt_validation_mean = cv_fivefold_dt['test_score'].mean()\ndt_validation_std = cv_fivefold_dt['test_score'].std()\nprint('Decision tree 5-fold cv results (Accuracy) %.3f =/- %.3f'%(dt_validation_mean, dt_validation_std))",
"Decision tree 5-fold cv results (Accuracy) 0.845 =/- 0.003\n"
]
],
[
[
"In the example below, we can see the accuracy, recall and the precision scores. As the negative actuals were higher than postives again, we see a low recall score, but the accuracy and precision scre are fairly high. Our scores are overall higher than the logistic regression ones.",
"_____no_output_____"
]
],
[
[
"print('Dec tree accruacy score:', accuracy_score(y_train, prediction_dt))\nprint('Dec tree precision score:', precision_score(y_train, prediction_dt))\nprint('Dec tree recall score:', recall_score(y_train, prediction_dt))",
"Dec tree accruacy score: 0.8457489723001268\nDec tree precision score: 0.788308009271182\nDec tree recall score: 0.48944675407739047\n"
]
],
[
[
"### Random Forest\n\nThere sample using a concept of bootstrapping where they will resampling of the data. They will look into the dataset and will resample the data based on where they will randomly choose data ffrom your original data set and replace it. Like they may place it in a different order or a specific data may be used twoce or more times. The different data sets can be used like a validations. By cahnging datasets we get different entries and we can get the average. I used this as th context seemed to be helpful in understanding how our dtaset can be resampled while modelling. The random forest will randomly sample observations with bootstrapping, but it will only show some parts of the predictor and not all of it. You will not see all dataset and this helps resolve the variance issue in bootstrapping.",
"_____no_output_____"
],
[
"We import the Random Forest Classifier from the Scikit Learn.Ensemble method as it ensembles many trees. The instantiation n_estimator parameter shows us how many trees we eill fit and each tree shows a bootstrapped sample of data and we set it to 300. I set every tree to at most 10 depths as it is a large dataset and it would be a good testing purpose. Following the import and instantiate, we are trying to fit our x scales training data and y training data into the random forest model and then we derive the predictions based on the x training data and the scores based on the scaled x training and the y training data.",
"_____no_output_____"
]
],
[
[
"from sklearn.ensemble import RandomForestClassifier",
"_____no_output_____"
],
[
"rf = RandomForestClassifier(n_estimators = 300, max_features='auto', max_depth= 10)\nrf.fit(x_train_scaled, y_train)\npredictions_rf = rf.predict(x_train_scaled)\nscore_rf = rf.score(x_train_scaled, y_train)",
"_____no_output_____"
]
],
[
[
"In the cell below, we are developing confusion matrix for the random forest to show the details of how many of the datas are predicted to be true and actually true, how many are not like that and it would show how many of the actual numbers matched with your prediction and how many did not.",
"_____no_output_____"
]
],
[
[
"cm_dt = metrics.confusion_matrix(y_train, predictions_rf)\ncm_dt",
"_____no_output_____"
]
],
[
[
"Below: We are displaying a visualization of the Confusion matrix for the training datasets. In the confusion matrix below, we notice that the score is about 87% accurate. This indicates it is mostly correct however about 13% of the time it is inaccurate. However, the accuracy score of this model is higher than the scores of the Logistic Regression and the Decision Tree model. This time the true negatives are highest, but the true postives are higher than the false postives unlike the other models.",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(9,9))\nsns.heatmap(cm_dt, annot=True, fmt=\".3f\", linewidths=.5, square = True, cmap = 'Pastel1')\nplt.ylabel('Actual label');\nplt.xlabel('Predicted label');\nall_sample_title = 'Accuracy Score for training data: {0}'.format(score_rf)\nplt.title(all_sample_title, size = 15);\nplt.show()",
"_____no_output_____"
]
],
[
[
"**Cross Validation** for Random Forest\n\nWe are doing 5 splittings when we are doing cross validations. We set the x values as scaled training values, set the y as y training value also we for it to return training score and return the estimator. We set the verbose is 2 for the machine to show us what is going on. ",
"_____no_output_____"
]
],
[
[
"cv_fivefold_rf = cross_validate(estimator= rf, \n X = x_train_scaled,\n y = y_train,\n cv = 5,\n return_train_score= True, \n return_estimator= True, \n verbose = 2)",
"[Parallel(n_jobs=1)]: Using backend SequentialBackend with 1 concurrent workers.\n"
]
],
[
[
"Below are the cross validation scores for our training and testing scores. It is higher than the scores for the Logisti Regression and Decision Tree.",
"_____no_output_____"
]
],
[
[
"print(cv_fivefold_rf['train_score'])\nprint(cv_fivefold_rf['test_score'])",
"[0.87316909 0.87648274 0.87465783 0.87288095 0.8770169 ]\n[0.86957357 0.85631963 0.85631963 0.86650019 0.85494717]\n"
]
],
[
[
"Over here, we find the mean of the validation and the standard deviation to verify the overall model accuracy. This is our baseline summary of decision tree model. Our mean is around 86.1 and there is a standard devaition of about 0.006 and this shows a better one than the log regression and Decision Tree.",
"_____no_output_____"
]
],
[
[
"validation_mean = cv_fivefold_rf['test_score'].mean()\n\nvalidation_std = cv_fivefold_rf['test_score'].std()\n\nprint('Random Forest 5-fold cv results (Accuracy) %.3f =/- %.3f'%(validation_mean, validation_std))",
"Random Forest 5-fold cv results (Accuracy) 0.861 =/- 0.006\n"
]
],
[
[
"In the example below, we can see the accuracy, recall and the precision scores. As the we definitely had a lot of actual negatives the recall score is higher, but also we had a good amount of positives, so the recall score is fairly higher than the recall score for the log reg and decision tree which were in their 30 and 40s",
"_____no_output_____"
]
],
[
[
"print('Random Forest accruacy score:', accuracy_score(y_train, predictions_rf))\nprint('Random Forest precision score:', precision_score(y_train, predictions_rf))\nprint('Random Forest recall score:', recall_score(y_train, predictions_rf))",
"Random Forest accruacy score: 0.8736409389527066\nRandom Forest precision score: 0.835179742256387\nRandom Forest recall score: 0.5906619763351455\n"
]
],
[
[
"As of now, it seems to me that the **Random Forest model has been performing the best** compared to **Logistic Regression** and **Decision Tree** model. However, we will be working on model tuning wehre we try to enhnce the model and aim to improve the overall scores. After tuning, we could make a call.",
"_____no_output_____"
],
[
"## Fine-tune your models",
"_____no_output_____"
],
[
"In this step, we will be fine tuning the model or trying to improve it or make it better. I plan on removing major outliers from specific columnss and evaluating how each of the models will be performing. I am comparing the Logistic Regression, Decision Tree and the Random Forest models after removing outliers with the original models with all the datas.",
"_____no_output_____"
],
[
"**Further fine-tuning: Checking the values after removing the outliers.**",
"_____no_output_____"
],
[
"It seems the categorical vlue of 4 is the outlier amongst all of the other races. If we recall from earlier, individuals whose race is white are significantly higher in count than all other races and it seems they are an outlier in this case. ",
"_____no_output_____"
]
],
[
[
"sns.countplot(x=\"race\", data=df_new)",
"_____no_output_____"
]
],
[
[
"It seems the categorical value of 1 and 3 seem to be fairly much shorter than the others and we will be removing those as they seem to be the outliers here. ",
"_____no_output_____"
]
],
[
[
"sns.countplot(x=\"marital.status\", data=df_new)",
"_____no_output_____"
]
],
[
[
"As we can see in the example below, those individuals who have the education qualification in 3, 10 and 13 seem to be much loser than the rest. In that case, I will be removing those columns or the field with those columns and then we will be running all of the models.",
"_____no_output_____"
]
],
[
[
"sns.countplot(x=\"education\", data=df_new)",
"_____no_output_____"
]
],
[
[
"There is a mistake for the first one we need to remov number 4 as that is the outlier and for second one we need to remove 1 andd 3 in marital status",
"_____no_output_____"
]
],
[
[
"index = df_new[(df_new['race'] == 4)|(df_new['marital.status'] == 1) | (df_new['marital.status'] == 3)\n |(df_new['education'] == 3)|(df_new['education'] == 10) | (df_new['education'] == 13)].index\ndf_new.drop(index, inplace=True)\n",
"_____no_output_____"
]
],
[
[
"We will be copying the updated dataframe with outlier removal just incase. ",
"_____no_output_____"
]
],
[
[
"df2 = df_new.copy()",
"_____no_output_____"
],
[
"df2.head()",
"_____no_output_____"
]
],
[
[
"**In this step,** we willbe preparing our data and getting those ready for modelling and data transformation once again. In the updated dataset without the outliers and set the x values to all other features except the target feature of income and the y is set to the target feature of income.",
"_____no_output_____"
]
],
[
[
"x2 = df2.drop(['income'], axis = 1)\ny2 = df2['income']",
"_____no_output_____"
]
],
[
[
"Below, we are splitting the data into x and y training datasets. ",
"_____no_output_____"
]
],
[
[
"x_tr, x_ts, y_tr, y_ts = train_test_split(x2, y2, test_size = 0.2, random_state = 1)",
"_____no_output_____"
]
],
[
[
"Below, we are **scaling** our x dataset to ensure that they are all in the same range of numbers or scaled into that range. Below, we import a scaler and declare a standard scaler or instantiate it.",
"_____no_output_____"
]
],
[
[
"std_scaler.fit(x_tr)\nx_tr_scaled = std_scaler.transform(x_tr)\nx_ts_scaled = std_scaler.transform(x_ts)",
"_____no_output_____"
]
],
[
[
"We will be instantiating our logistic regression model with the updated data sets training and testing set. We fit it with the scaled x training dataset andd the y training dataset.",
"_____no_output_____"
]
],
[
[
"lr = LogisticRegression(penalty = 'none', random_state = 1) \nlr.fit(x_tr_scaled, y_tr)",
"_____no_output_____"
]
],
[
[
"We are setting our accuracy score of the model by taking the scaled x training set and the y training set. Following that step, we are making a prediction by only using the scaled x training set. ",
"_____no_output_____"
]
],
[
[
"score2 = lr.score(x_tr_scaled, y_tr)\npred2 = lr.predict(x_tr_scaled)",
"_____no_output_____"
]
],
[
[
"In the confusion matrix below, we notice that the score is about 87% accurate. It was 80% in with the outliers. This evaluates that it would be correct about 87 percent of the time, but approximately 13% of the time it would be incorrect in the values it gives us. In this case, we still see the true negative numbers are the highest, but the true postiive is larger than the true negative and that is the exact opposite of what we had noticed when we were modelling it with the outliers.",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(9,9))\nsns.heatmap(cm_dt, annot=True, fmt=\".3f\", linewidths=.5, square = True, cmap = 'Pastel1')\nplt.ylabel('Actual label');\nplt.xlabel('Predicted label');\nall_sample_title = 'Accuracy Score for training data: {0}'.format(score2)\nplt.title(all_sample_title, size = 15);\nplt.show()",
"_____no_output_____"
]
],
[
[
"**Cross Validation** for Logistics Regression without outliers\n\nIn this step, we are cross validating out model. We are doing 5 splittings. The x values are set as the scaled training dataset in the updated data set without outliers and the y values are set as the also we for it to return training score and return the estimator. The verbose is 2 for the machine to show us what is going on. ",
"_____no_output_____"
]
],
[
[
"cv_fivefold_lr2 = cross_validate(estimator= lr, \n X = x_tr_scaled,\n y = y_tr,\n cv = 5,\n return_train_score= True, \n return_estimator= True, \n verbose = 2)",
"[CV] ................................................................\n[CV] ................................................. , total= 0.0s\n[CV] ................................................................\n[CV] ................................................. , total= 0.0s\n[CV] ................................................................\n[CV] ................................................. , total= 0.0s\n[CV] ................................................................\n[CV] ................................................. , total= 0.0s\n[CV] ................................................................\n[CV] ................................................. , total= 0.0s\n"
]
],
[
[
"This is the cross validation data of our training and testing scores. The sscores are better than theee cross validations for the model without outliers. ",
"_____no_output_____"
]
],
[
[
"print(cv_fivefold_lr2['train_score'])\nprint(cv_fivefold_lr2['test_score'])",
"[0.87262686 0.88022092 0.87646653 0.87957212 0.88336784]\n[0.89241379 0.87448276 0.88259669 0.87707182 0.86187845]\n"
]
],
[
[
"Below, we are looking for the mean of the validation and the standard deviation of it to verify the overall model accuracy. This is the overall summary of logistic regressions without outliers. Our mean is around 84.5% and there is a standard devaition of about 0.003. This score is better than the 80% score range.",
"_____no_output_____"
]
],
[
[
"lr2_validation_mean = cv_fivefold_dt['test_score'].mean()\nlr2_validation_std = cv_fivefold_dt['test_score'].std()\nprint('Logistic regression without outliers 5-fold cv results (Accuracy) %.3f =/- %.3f'%(lr2_validation_mean, lr2_validation_std))",
"Logistic regression without outliers 5-fold cv results (Accuracy) 0.845 =/- 0.003\n"
]
],
[
[
"Below, are our scores and we are comparing the scores after outlier removal with the ones when the outlier wass there. As we can see, the accuracy and the precision score is better without outliers than the ones with outliers. On the other hand, the recall score is around the same range.",
"_____no_output_____"
]
],
[
[
"print('Log reg accruacy score without ouliers:', accuracy_score(y_tr, pred2))\nprint('Log reg precision score without ouliers:', precision_score(y_tr, pred2))\nprint('Log reg recall score without ouliers:', recall_score(y_tr, pred2))",
"Log reg accruacy score without ouliers: 0.8787962451684153\nLog reg precision score without ouliers: 0.7456896551724138\nLog reg recall score without ouliers: 0.3128390596745027\n"
],
[
"print('Log reg accruacy score:', accuracy_score(y_train, predictions))\nprint('Log reg precision score:', precision_score(y_train, predictions))\nprint('Log reg recall score:', recall_score(y_train, predictions))",
"Log reg accruacy score: 0.8028352990894772\nLog reg precision score: 0.6951983298538622\nLog reg recall score: 0.3194755356571794\n"
]
],
[
[
"We will be instantiating our decision tree model with the updated data sets training and testing set. We fit it with the scaled x training dataset andd the y training dataset. We are also setting our accuracy score of the model by taking the scaled x training set and the y training set. Following that step, we are making a prediction by only using the scaled x training set. ",
"_____no_output_____"
]
],
[
[
"dt = DecisionTreeClassifier(max_leaf_nodes=15, random_state = 1)\ndt.fit(x_tr_scaled, y_tr)\ndt_sc = dt.score(x_tr_scaled, y_tr)\ndt_pred = dt.predict(x_tr_scaled)",
"_____no_output_____"
]
],
[
[
"In the confusion matrix below, we notice that the score is about 89% accurate. It was about 84% in with the outliers. This evaluates that it would be correct about 89 percent of the time, but approximately 11% of the time it would be incorrect in the values it gives us. In this case the true positive is higher than the false positive which was not the case when we were modelling it with all values including outliers. However, the true negatives are higher than true positives. This score is higher than the logistic regression score even without removing the outliers.",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(9,9))\nsns.heatmap(cm_dt, annot=True, fmt=\".3f\", linewidths=.5, square = True, cmap = 'Pastel1')\nplt.ylabel('Actual label');\nplt.xlabel('Predicted label');\nall_sample_title = 'Accuracy Score for training data: {0}'.format(dt_sc)\nplt.title(all_sample_title, size = 15);\nplt.show()",
"_____no_output_____"
]
],
[
[
"**Cross Validation** for Decision tree\n\nWe are doing 5 splittings when we are doing cross validations. We set the x values as scaled training values in the updated data set. Then we set the y as y training value also we for it to return training score and return the estimator. We set the verbose is 2 for the machine to show us what is going on. ",
"_____no_output_____"
]
],
[
[
"cv_fivefold_dt2 = cross_validate(estimator= dt, \n X = x_tr_scaled,\n y = y_tr,\n cv = 5,\n return_train_score= True, \n return_estimator= True, \n verbose = 2)",
"[CV] ................................................................\n[CV] ................................................. , total= 0.0s\n[CV] ................................................................\n[CV] ................................................. , total= 0.0s\n[CV] ................................................................\n[CV] ................................................. , total= 0.0s\n[CV] ................................................................\n[CV] ................................................. , total= 0.0s\n[CV] ................................................................\n[CV] ................................................. , total= 0.0s\n"
]
],
[
[
"Below are the cross validation scores for our training and testing scores. These scores are better than our scores with all the data including outliers.",
"_____no_output_____"
]
],
[
[
"print(cv_fivefold_dt2['train_score'])\nprint(cv_fivefold_dt2['test_score'])",
"[0.89161201 0.900932 0.89544513 0.89855072 0.8957902 ]\n[0.85793103 0.88689655 0.88950276 0.87707182 0.87016575]\n"
]
],
[
[
"Over here, we find the mean of the validation and the standard deviation to verify the overall model accuracy. This is our baseline summary of decision tree model. Our mean is around 87.6 and there is a standard devaition of about 0.012. The overall score is better than the one without the outliers for decision tree. ",
"_____no_output_____"
]
],
[
[
"dt2_validation_mean = cv_fivefold_dt2['test_score'].mean()\ndt2_validation_std = cv_fivefold_dt2['test_score'].std()\nprint('Decision tree without outliers 5-fold cv results (Accuracy) %.3f =/- %.3f'%(dt2_validation_mean, dt2_validation_std))",
"Decision tree without outliers 5-fold cv results (Accuracy) 0.876 =/- 0.012\n"
]
],
[
[
"In the example below, we can see the accuracy, recall and the precision scores after removing outliers and we are comparing it with the scores that had no outliers and all of the values were present. The overall scores are much higher in all three of the cases. The accuracy recall and precision scores are higher once we have removed the outliers.",
"_____no_output_____"
]
],
[
[
"print('Dec tree accruacy score without ouliers:', accuracy_score(y_tr, dt_pred))\nprint('Dec tree precision score without ouliers:', precision_score(y_tr, dt_pred))\nprint('Dec tree recall score without ouliers:', recall_score(y_tr, dt_pred))",
"Dec tree accruacy score without ouliers: 0.8942573163997791\nDec tree precision score without ouliers: 0.7083333333333334\nDec tree recall score without ouliers: 0.5226039783001808\n"
],
[
"print('Dec tree accruacy score:', accuracy_score(y_train, prediction_dt))\nprint('Dec tree precision score:', precision_score(y_train, prediction_dt))\nprint('Dec tree recall score:', recall_score(y_train, prediction_dt))",
"Dec tree accruacy score: 0.8457489723001268\nDec tree precision score: 0.788308009271182\nDec tree recall score: 0.48944675407739047\n"
]
],
[
[
"We will be instantiating our random forest model with the updated data sets training and testing set. We fit it with the scaled x training dataset and the y training dataset. We are also setting our accuracy score of the model by taking the scaled x training set and the y training set. Following that step, we are making a prediction by only using the scaled x training set.",
"_____no_output_____"
]
],
[
[
"rf2 = RandomForestClassifier(n_estimators = 300, max_features='auto', max_depth= 10)\nrf2.fit(x_tr_scaled, y_tr)\nrf2_sc = rf2.score(x_tr_scaled, y_tr)\nrf2_pred = rf2.predict(x_tr_scaled)",
"_____no_output_____"
]
],
[
[
"In the confusion matrix below, we notice that the score is about 93% accurate. It was about 87% in with the outliers. This evaluates that it would be correct about 93 percent of the time, but approximately 7% of the time it would be incorrect in the values it gives us. In this case the true positive is higher than the false positiveonce again. However, the true negatives cases are the highest this time also. This model performed better than the logistic regressiona nd the deicision tree model. ",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(9,9))\nsns.heatmap(cm_dt, annot=True, fmt=\".3f\", linewidths=.5, square = True, cmap = 'Pastel1')\nplt.ylabel('Actual label');\nplt.xlabel('Predicted label');\nall_sample_title = 'Accuracy Score for training data: {0}'.format(rf2_sc)\nplt.title(all_sample_title, size = 15);\nplt.show()",
"_____no_output_____"
]
],
[
[
"**Cross Validation** for Random Forest\n\nWe are doing 5 splittings when we are doing cross validations. We set the x values as scaled training values in the updated data set. Then we set the y as y training value also we for it to return training score and return the estimator. We set the verbose is 2 for the machine to show us what is going on. ",
"_____no_output_____"
]
],
[
[
"cv_fivefold_rf2 = cross_validate(estimator= rf2, \n X = x_tr_scaled,\n y = y_tr,\n cv = 5,\n return_train_score= True, \n return_estimator= True, \n verbose = 2)",
"[Parallel(n_jobs=1)]: Using backend SequentialBackend with 1 concurrent workers.\n"
]
],
[
[
"Below are the cross validation scores for our training and testing scores. These scores are better than our scores with all the data including outliers for random forest.",
"_____no_output_____"
]
],
[
[
"print(cv_fivefold_rf2['train_score'])\nprint(cv_fivefold_rf2['test_score'])",
"[0.94131861 0.94338971 0.94375431 0.94064872 0.94168392]\n[0.88827586 0.89931034 0.90331492 0.88259669 0.88535912]\n"
]
],
[
[
"Over here, we find the mean of the validation and the standard deviation to verify the overall model accuracy. This is our overall summary of random forest model. Our mean is around 89.2 and there is a standard devaition of about 0.0108. The overall score is better than the one without the outliers for random forest model our mean was 86% in that one. ",
"_____no_output_____"
]
],
[
[
"rf2_validation_mean = cv_fivefold_rf2['test_score'].mean()\nrf2_validation_std = cv_fivefold_rf2['test_score'].std()\nprint('Decision tree without outliers 5-fold cv results (Accuracy) %.3f =/- %.3f'%(rf2_validation_mean, rf2_validation_std))",
"Decision tree without outliers 5-fold cv results (Accuracy) 0.892 =/- 0.008\n"
]
],
[
[
"In our example below, we can see that the recall score was slightly lower for the ones with removal of outliers but the accuracy and the recall scores were higher when outliers were removed in the case of Random Forest.",
"_____no_output_____"
]
],
[
[
"print('Random Forest accruacy score without outliers:', accuracy_score(y_tr, rf2_pred))\nprint('Random Forest precision score without outliers:', precision_score(y_tr, rf2_pred))\nprint('Random Forest recall score without outliers:', recall_score(y_tr, rf2_pred))",
"Random Forest accruacy score without outliers: 0.9342904472667035\nRandom Forest precision score without outliers: 0.9876160990712074\nRandom Forest recall score without outliers: 0.5768535262206148\n"
],
[
"print('Random Forest accruacy score:', accuracy_score(y_train, predictions_rf))\nprint('Random Forest precision score:', precision_score(y_train, predictions_rf))\nprint('Random Forest recall score:', recall_score(y_train, predictions_rf))",
"Random Forest accruacy score: 0.8736409389527066\nRandom Forest precision score: 0.835179742256387\nRandom Forest recall score: 0.5906619763351455\n"
]
],
[
[
"## Present your solution",
"_____no_output_____"
],
[
"After looking at all of the details and considering it, I decided to go with the random forest model as it had a higher accuracy score than the other ones and the the preductions of true postiives are true negatives were in the top 2 so a majority of the data was predicted correctly. The accuracy score exceeeded 90 was about 93% with the outliers removed They also had the highest calculation scores when in terms of cross validation which was 0.892% +/- 0.008 when outliers were removed. I also have decided to go with the solutions after the outlier was removed as the overall scores were higher in those calses. ",
"_____no_output_____"
],
[
"### Model Results and Final Choice",
"_____no_output_____"
],
[
"We instantiate the model againa ndd fit it again, but this time we are testing and showing out final output after decision is made, so we can use our test datasets in this case. We can see that the true positives and the true negeatives are higher thant he other so it made the correct predictions at the most of the time. I also notice that the precision score was perfect taht calculates the true positive / (true positives + false positives).",
"_____no_output_____"
]
],
[
[
"ran_f = RandomForestClassifier(n_estimators = 300, max_features='auto', max_depth= 10)\nran_f.fit(x_ts_scaled, y_ts)\nran_f_sc = ran_f.score(x_ts_scaled, y_ts)\nran_f_pred = ran_f.predict(x_ts_scaled)",
"_____no_output_____"
],
[
"print('Random Forest accruacy score without outliers:', accuracy_score(y_ts, ran_f_pred))\nprint('Random Forest precision score without outliers:', precision_score(y_ts, ran_f_pred))\nprint('Random Forest recall score without outliers:', recall_score(y_ts, ran_f_pred))",
"Random Forest accruacy score without outliers: 0.9845474613686535\nRandom Forest precision score without outliers: 1.0\nRandom Forest recall score without outliers: 0.8947368421052632\n"
]
],
[
[
"In the confusion matrix below, we can see that there is a 98% accuracy rate with the test data set. ",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(9,9))\nsns.heatmap(cm_dt, annot=True, fmt=\".3f\", linewidths=.5, square = True, cmap = 'Pastel1')\nplt.ylabel('Actual label');\nplt.xlabel('Predicted label');\nall_sample_title = 'Accuracy Score for training data: {0}'.format(ran_f_sc)\nplt.title(all_sample_title, size = 15);\nplt.show()",
"_____no_output_____"
]
],
[
[
"Below, we developed an ROC curve it shows the comparison of our true psotove to the false positive rate and the larger the area under the specific curve will be the better it is. The area under curve is AUC score. As we can see here also, the Random Forest classifier is the highest in comparison to the other two. After taking a look at the model results, the ROC curve and the results of the model scores shows us that the Random Forest is the best modelling option and I will be recommending that.",
"_____no_output_____"
]
],
[
[
"log_dis = metrics.plot_roc_curve(lr, x_ts_scaled, y_ts) \ndt_dis = metrics.plot_roc_curve(dt, x_ts_scaled, y_ts, ax = log_dis.ax_)\nrf_dis = metrics.plot_roc_curve(rf2, x_ts_scaled, y_ts, ax = log_dis.ax_)\n\nrf_dis.figure_.suptitle('ROC curve comparison')\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Conclusions, Limitations and later work",
"_____no_output_____"
],
[
"As we have already seen, this dataset contains information about individuals in a census containing improtant information like their age, gender, job occupation, job profession, education completed, captial gain/loss, etc.\nAlong with that, we have seen that we there income was placed into classifications where it was either greater than 50K income or less than and equal to 50K income. As a data scientist in The US Census bureau, my job was to to verify the accuracy of whether each of the individuals salary range was accurate or if somehting seemed fishy and accordingly I used logistic regression model, decision tree model and random forest model. Then my role wwas to give the idea on which model was the best option for this task and recommend one. In order to do that, I needed to provide accuracy details to the company and give them an idea on the dataset and express my thoughts. I initially converted all of the categorical datas to all numeric variables and used those to test it in logistic regression models along with decision tree models. Though, we had aimed for accuracy scores above 75% as that was our baseline, I realized that the logistic regression model has a specific accuracy score higher than 80 and also repeated the steps with logistic regression and random forest model and realized that all of them had a score higher than 80%. However, one common thing was that all of the models hod the highest true negative value and the true positive value was less than false negative. A false negative is scary as things are good but prediction comes that it is nor good. However, that was not the case in the random forest model. In order to further fine tune and enhance my model, I decided to remove outliers from specific columns and see how the model was affected. I realized that it helped the model significanatly and the random forest moel came in the 93 percent range. In all of the cases the crosss validation accuracy scores were also higher for the results of the random forest model. I realized that due to the fact that the random forest model shows a higher data of accuracy in our training sets, I fel that it would be an ideal candidate to use for classification modelling next time. Along with that, I realized that in this dataset the negative overalls were higher than positives, but it was goodd taht the true negatives were realized if they fell in the false positive category that would be bad also as an individual woul feel assured as prrediction would say good, but in reality things maay be bad. That I felt was one **limitation** where the negative cases may ahve been high. Along with that, another **limitation** I felt was that more data could be used like the housing status and possoibly if it was scaled a bit further, I wonder how thee model may have been impacted. Some of my **next steps** are to see how housing status may play a role like if they are in rent, own a house, etc. I also plan as a **next step** to possibly remove more outliers, but it seem that whether you should emove outliers or not depends on the what your aim is. Another, **next step** is that I plan to help enahce the model so it would possibly be higher than 98. 98 was a good score, however when we are evaluating a score like 99 or as close to 100 would be reassuring and that is what I plan to look into fo the long run. ",
"_____no_output_____"
],
[
"## References and contributions",
"_____no_output_____"
],
[
"- Dataset: https://www.kaggle.com/uciml/adult-census-income\n- Dataset inspired by: http://archive.ics.uci.edu/ml/datasets/Adult\n- https://www.usnews.com/news/us/articles/2020-02-03/report-census-hasnt-tested-tasks-to-catch-people-who-lie\n- https://federalnewsnetwork.com/federal-newscast/2020/03/spreading-false-information-about-2020-census-could-land-you-in-jail-if-new-bill-becomes-law/",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
4a18ed3f31bf4a53ae6cb6a958dfd411c98d995c
| 294,736 |
ipynb
|
Jupyter Notebook
|
lectures/notebooks/02_probabilistic_programming.ipynb
|
wilsonjefferson/DSSC_SML
|
a49adbadba1876b9769144d120d66be7e642062b
|
[
"MIT"
] | null | null | null |
lectures/notebooks/02_probabilistic_programming.ipynb
|
wilsonjefferson/DSSC_SML
|
a49adbadba1876b9769144d120d66be7e642062b
|
[
"MIT"
] | null | null | null |
lectures/notebooks/02_probabilistic_programming.ipynb
|
wilsonjefferson/DSSC_SML
|
a49adbadba1876b9769144d120d66be7e642062b
|
[
"MIT"
] | null | null | null | 611.485477 | 119,528 | 0.946769 |
[
[
[
"# Probabilistic Programming\n\nA Probabilistic Programming Language (PPL) is a computer language providing statistical modelling and inference functionalities, in order to reason about random variables, probability distributions and conditioning problems. The most popular PPLs are `Stan`, `PyMC`, `Pyro` and `Edward`. ",
"_____no_output_____"
],
[
"A probabilistic program is a mix of **deterministic computation** and **sampling**, which allows to *draw random values* from distributions, to *condition* variables on observations and to perform *inference*.",
"_____no_output_____"
],
[
"### Pyro\nPyro is a universal probabilistic programming language based on Python.\n\nIt can represent any probabilistic model, while providing automatic optimization-based inference that is flexible and scalable to large data sets. \nPyro builds on PyTorch library, which supports GPU-accelerated tensor math and includes automatic differentiation, a technique for efficiently computing gradients.\n",
"_____no_output_____"
],
[
"### Models\nThe basic unit of probabilistic programs is the stochastic function (or model). A statistical model is a mathematical description of how the values of some knowns and unknowns could generate the observed data.\n\n\n<div align=\"center\" style=\"color:darkblue\">S. Wood, \"Core Statistics\"</div>\n\n\nA stochastic function in Python is an arbitrary function that combines two ingredients:\n\n- deterministic Python code\n- primitive stochastic functions that call a random number generator",
"_____no_output_____"
],
[
"**Using `sample()` primitive** \n\nDrawing a sample from the unit normal distribution $\\mathcal{N}(0,1)$.",
"_____no_output_____"
]
],
[
[
"import torch\nimport pyro\npyro.set_rng_seed(1) # for reproducibility\n\nloc = 0. # mean \nscale = 1. # standard deviation\n\n# using pytorch\nnormal = torch.distributions.Normal(loc, scale) # create a normal distribution object\nx = normal.rsample() # draw a sample from N(0,1)\nprint(\"pytorch sample:\\t\", x)\n\n# using pyro\nx = pyro.sample(\"sample_name\", pyro.distributions.Normal(loc, scale))\n\nprint(\"pyro sample:\\t\", x)",
"pytorch sample:\t tensor(0.6614)\npyro sample:\t tensor(0.2669)\n"
]
],
[
[
"Pyro samples are named: Pytorch backend uses these names to uniquely identify sample statements and change their behavior at runtime depending on how the enclosing stochastic function is being used.",
"_____no_output_____"
],
[
"**Drawing multiple samples**\n\nNow we draw multiple samples from $\\mathcal{N}(2,2)$ and $\\text{Exp}(0.3)$ distributions and plot the corresponding histograms.",
"_____no_output_____"
]
],
[
[
"import seaborn as sns \nimport matplotlib.pyplot as plt\nimport pyro.distributions as dist\n\n# distributions\nnormal = dist.Normal(2, 2)\nexp = dist.Exponential(0.3)\n\n# multiple samples\nnormal_samples = [pyro.sample(\"n\",normal) for i in range(200)]\nexp_samples = [pyro.sample(\"n\",exp) for i in range(200)]\n\n#plot\nfig, axes = plt.subplots(1, 2, figsize=(12,4))\nsns.distplot(normal_samples, ax=axes[0])\nsns.distplot(exp_samples, ax=axes[1])\naxes[0].set_title('Normal')\naxes[1].set_title('Exponential')\nplt.show()",
"_____no_output_____"
]
],
[
[
"Seaborn `distplot()` automatically estimates the PDFs over histogram bins.",
"_____no_output_____"
],
[
"**Simple stochastic model**\n\nSuppose we want to reason about how temperature interacts with sunny and cloudy weather. We can define a simple stochastic function `weather()` describing the interaction\n\n\n$$ \\mathcal{N}(12.0,5.0^2) \\; \\text{for cloudy weather}$$\n \n$$\\mathcal{N}(23.0,6.0^2) \\; \\text{for sunny weather} \\; $$",
"_____no_output_____"
]
],
[
[
"def weather():\n # generate a binary sample\n is_cloudy = pyro.sample('cloudy', dist.Bernoulli(0.3))\n \n # convert binary sample into categorical\n is_cloudy = 'cloudy' if is_cloudy.item() == 1.0 else 'sunny'\n \n loc_temp = {'cloudy': 12.0, 'sunny': 23.0}[is_cloudy]\n scale_temp = {'cloudy': 5.0, 'sunny': 6.0}[is_cloudy]\n temp = pyro.sample('temp', dist.Normal(loc_temp, scale_temp))\n \n return {\"weather\":is_cloudy, \"temp\":temp.item()}\n\n[weather() for _ in range(5)]",
"_____no_output_____"
]
],
[
[
"We could use this stochastic function to model the sales of ice cream based on the weather.",
"_____no_output_____"
]
],
[
[
"def ice_cream_sales():\n is_cloudy, temp = weather()\n expected_sales = 200. if is_cloudy == 1 and temp > 35.0 else 20.\n sales = pyro.sample('ice_cream', pyro.distributions.Normal(expected_sales, 10.0))\n return sales\n\n[ice_cream_sales() for _ in range(5)]",
"_____no_output_____"
]
],
[
[
"## Inference\n\nThe purpose of statistical inference is that of using a statistical model to infer the values of the unknowns that are consistent with the observed data.\n\n\n|Frequentist interpretation|Bayesian interpretation|\n|:-:|:-:|\n|Probability measures a proportion of outcomes. | Probability measures the believability in an event. |\n|There is randomness in our estimation of the parameters, but not in the parameters themselves, which are considered as fixed.| Parameters are treated as random variables and our belief about these parameters is updated in the light of data.|\n\n",
"_____no_output_____"
],
[
"### Bayesian inference\n",
"_____no_output_____"
],
[
"**Bayes theorem**\n\nLet $A$ and $B$ be two events, such that $P(B)\\neq0$, then $\nP(A|B) = \\frac{P(A,B)}{P(B)}=\\frac{P(B|A)P(A)}{P(B)}$.\n\n\n<div> <img src=\"attachment:image.png\" width=\"400\"/></div>\n<div align=\"center\" style=\"color:darkblue\">https://medium.com/informatics-lab/probabilistic-programming-1535d7882dbe</div>\n\n**Bayes theorem example**\n\nThere are two boxes $b_1$ and $b_2$. Box 1 contains three red and five white balls and box 2 contains two red and five white balls. A box $B\\in\\{b1,b2\\}$ is chosen at random with $P(B=b_1)=P(B=b_2)=0.5$ and a ball chosen at random from this box turns out to be red. \nWhat is the posterior probability that the red ball came from box 1?\n\n$R\\in\\{0,1\\}$ indicates whether the chosen ball is red or not.\n\nFrom Bayes theorem we get \n$$\nP(B=b_1|R=1)=\\frac{P(B=b_1,R=1)}{P(R=1)}.\n$$\n\nand $P(B=b_1,R=1) = P(R=1|B=b_1)P(B=b_1)=\\frac{3}{8}\\cdot \\frac{1}{2}$. \n\nFrom the law of total probability $P(R=1)=\\sum_{i\\in\\{1,2\\}}P(R=1|B=b_i)P(B=b_i)=\\frac{3}{8}\\cdot \\frac{1}{2}+\\frac{2}{7}\\cdot\\frac{1}{2}=\\frac{37}{112}$.\n\nConsequently, $$P(B=b_1|R=1)=\\frac{3}{16}\\cdot\\frac{112}{37}=\\frac{21}{37}\\approx 0.56$$",
"_____no_output_____"
],
[
"**Posterior probability**\n\nUnder the Bayesian paradigm we do not estimate parameters, we\ncompute their distribution based on the given data.\n\nThe posterior probability is derived according to Bayes' theorem\n\n$$\np(\\theta|x) = \\frac{p(x|\\theta)p(\\theta)}{p(x)}\n$$\n\nand the idea of uncertainty is preserved by the specific interpretation attributed to the involved terms:\n\n- **prior probability** $p(\\theta)$ = degree of belief of event occurring before observing any evidence\n- **evidence** $p(x)$ = observed data\n- **likelihood** $p(x|\\theta)$ = compatibility of the evidence with the given hypothesis\n- **posterior probability** $p(\\theta|x)$ = updated belief given the evidence\n\n<div> <img src=\"attachment:image.png\" width=\"500\"/></div>\n\n<div align=\"center\" style=\"color:darkblue\">https://www.researchgate.net/figure/Bayesian-updating-of-the-prior-distribution-to-posterior-distribution-The-Posterior_fig1_320507985</div>\n\n",
"_____no_output_____"
],
[
"### Conjugate priors\n\nIf the posterior distribution $p(\\theta|x)$ belongs to the same family as the prior distribution $p(\\theta)$, then the prior is said to be a **conjugate prior** for the likelihood function $p(x|\\theta)$.\nThis is a particularly convenient case in which the posterior distribution has a closed-form expression.\n\nThese are just a few examples of conjugate priors:\n\n|Conjugate prior distribution| Likelihood | Prior hyperparameters | Posterior hyperparameters|\n|:------:|:-----:|:-----:|:------:|\n|Normal|Normal (known var.)|$$\\mu_0,\\sigma_0^2$$|$${\\frac{1}{\\frac{1}{\\sigma_0^2}+\\frac{n}{\\sigma^2}}\\Bigg(\\frac{\\mu_0}{\\sigma_0^2}+\\frac{\\sum_{i=1}^n x_i}{\\sigma^2}\\Bigg),\\Bigg(\\frac{1}{\\sigma_0^2}+\\frac{n}{\\sigma^2}\\Bigg)^{-1}}$$|\n|Inverse Gamma|Normal (known mean)|$$\\alpha,\\beta$$|$$\\alpha+\\frac{n}{2},\\beta+\\frac{\\sum_{i=1}^n (x_i-\\mu)^2}{2}$$|\n|Beta|Binomial|$$\\alpha,\\beta$$|$$\\alpha+\\sum_{i=1}^n x_i, n-\\sum_{i=1}^n x_i+\\beta$$|\n|Gamma|Poisson|$$k,\\theta$$|$$k+\\sum_{i=1}^n x_i,\\frac{\\theta}{n\\theta+1}$$|\n|Gamma|Exponential|$$\\alpha,\\beta$$|$$\\alpha+n,\\beta+\\sum_{i=1}^n x_i$$|",
"_____no_output_____"
],
[
"**Beta-Binomial case**\n\n$Beta(\\alpha,\\beta)$ prior and $x\\sim Bin(n,\\pi)$ likelihood result in the posterior\n\n\\begin{align}\np(\\pi|x,\\alpha,\\beta)&\\propto \\pi^x (1-\\pi)^{n-x} \\pi^{\\alpha-1}(1-\\pi)^{\\beta-1}\\\\\n &\\propto \\pi^{x+\\alpha-1}(1-\\pi)^{n-x+\\beta-1}\n\\end{align}\n\nwhich is a $Beta(x+\\alpha, n-x+\\beta)$.",
"_____no_output_____"
],
[
"### Approximate inference\n\nPrior $p(\\theta)$ and likelihood $p(x|\\theta)$ functions are usually known as part of the model, while the computation of the normalization factor \n\n$$\np(x) = \\int_\\theta p(x|\\theta) p (\\theta) d \\theta\n$$\n\ncan easily become intractable in the high-dimensional cases.",
"_____no_output_____"
],
[
"**Example of intractable posterior**\n\nSuppose we are trying to figure out how much something weighs, but the scale we’re using is unreliable and gives slightly different answers every time we weigh the same object. We could try to compensate for this variability by integrating the noisy measurement information with a guess based on some *prior knowledge* about the object.\n\n$$weight \\, | \\, guess \\sim \\mathcal{N}(guess, 1)$$\n\n$$ measurement \\, | \\, guess, weight \\sim \\mathcal{N}(weight, 0.75^2) $$",
"_____no_output_____"
]
],
[
[
"def scale(guess):\n weight = pyro.sample(\"weight\", dist.Normal(guess, 1.0))\n measurement = pyro.sample(\"measurement\", dist.Normal(weight, 0.75))\n return measurement",
"_____no_output_____"
]
],
[
[
"The model is quite simple, so we are able to determine our posterior distribution of interest analytically. But in general the exact computation of the posterior of an arbitrary stochastic function is intractable. \n\nEven the `scale` model with a non-linear function may become intractable.",
"_____no_output_____"
]
],
[
[
"def intractable_scale(guess):\n weight = pyro.sample(\"weight\", dist.Normal(guess, 1.0))\n measurement = pyro.sample(\"measurement\", \n dist.Normal(some_nonlinear_function(weight), 0.75))\n return measurement",
"_____no_output_____"
]
],
[
[
"Approximate inference addresses the need of applying Bayesian learning to more complex problems and to the high-dimensional datasets that we are dealing with in machine learning.\n\nExamples of approximate inference include Variational Bayesian methods, Markov chain Monte Carlo, Markov Random Fields and Bayesian Networks.\nWe can identify two main categories for approximate inference:\n- **Stochastic methods** turn the problem of inference into a problem of sampling from the posterior distribution of interest;\n- **Deterministic methods** substitute inference with optimization problems.",
"_____no_output_____"
],
[
"## References\n- S. Wood, \"Core Statistics\"\n- [Pyro library](https://pyro.ai/)\n- [Pyro documentation](https://docs.pyro.ai/en/1.1.0/index.html)\n- [Probabilistic Programming & Bayesian Methods for Hackers](https://camdavidsonpilon.github.io/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers/)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
4a18f7f5bce40523991674db61bc48d4162b54aa
| 8,071 |
ipynb
|
Jupyter Notebook
|
Twitter_Sentiment_Analysis.ipynb
|
jeremyyma/ML2
|
c4a504eb7a2baa7a5c4139610f33527b63cf922e
|
[
"Apache-2.0"
] | null | null | null |
Twitter_Sentiment_Analysis.ipynb
|
jeremyyma/ML2
|
c4a504eb7a2baa7a5c4139610f33527b63cf922e
|
[
"Apache-2.0"
] | null | null | null |
Twitter_Sentiment_Analysis.ipynb
|
jeremyyma/ML2
|
c4a504eb7a2baa7a5c4139610f33527b63cf922e
|
[
"Apache-2.0"
] | null | null | null | 35.244541 | 264 | 0.516541 |
[
[
[
"## Twitter Sentiment Analysis\nDetermining whether a piece of writing is positive, negative or neutral. It’s also known as opinion mining, deriving the opinion or attitude of a speaker.\n\nconda install -n py36 -c conda-forge tweepy\nconda install -n py36 -c conda-forge textblob\n",
"_____no_output_____"
]
],
[
[
"import re \nimport tweepy \nfrom tweepy import OAuthHandler \nfrom textblob import TextBlob ",
"_____no_output_____"
],
[
"class TwitterClient(object): \n ''' \n Generic Twitter Class for sentiment analysis. \n '''\n def __init__(self): \n ''' \n Class constructor or initialization method. \n '''\n # keys and tokens from the Twitter Dev Console \n consumer_key = 'Y6QWAWLoyjHCCA20qPmBp2wkI'\n consumer_secret = 'SZPH7zWLJuxDskwoRYqfUb2Lz1yeftyYGh7DBimGz1niLA6o5N'\n access_token = '3896792723-IcHSdssHFC1cGpghgO2On6bc4j0y31Wzw2Yb2Gg'\n access_token_secret = 'ejbGb8zEsvpIhcPtH264pkvsfkMYjspDvaT1YSexdxrAv'\n \n # attempt authentication \n try: \n # create OAuthHandler object \n self.auth = OAuthHandler(consumer_key, consumer_secret) \n # set access token and secret \n self.auth.set_access_token(access_token, access_token_secret) \n # create tweepy API object to fetch tweets \n self.api = tweepy.API(self.auth) \n except: \n print(\"Error: Authentication Failed\") \n \n def clean_tweet(self, tweet): \n ''' \n Utility function to clean tweet text by removing links, special characters \n using simple regex statements. \n '''\n return ' '.join(re.sub(\"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t]) \n |(\\w+:\\/\\/\\S+)\", \" \", tweet).split()) \n \n def get_tweet_sentiment(self, tweet): \n ''' \n Utility function to classify sentiment of passed tweet \n using textblob's sentiment method \n '''\n # create TextBlob object of passed tweet text \n analysis = TextBlob(self.clean_tweet(tweet)) \n # set sentiment \n if analysis.sentiment.polarity > 0: \n return 'positive'\n elif analysis.sentiment.polarity == 0: \n return 'neutral'\n else: \n return 'negative'\n \n def get_tweets(self, query, count = 10): \n ''' \n Main function to fetch tweets and parse them. \n '''\n # empty list to store parsed tweets \n tweets = [] \n \n try: \n # call twitter api to fetch tweets \n fetched_tweets = self.api.search(q = query, count = count) \n \n # parsing tweets one by one \n for tweet in fetched_tweets: \n # empty dictionary to store required params of a tweet \n parsed_tweet = {} \n \n # saving text of tweet \n parsed_tweet['text'] = tweet.text \n # saving sentiment of tweet \n parsed_tweet['sentiment'] = self.get_tweet_sentiment(tweet.text) \n \n # appending parsed tweet to tweets list \n if tweet.retweet_count > 0: \n # if tweet has retweets, ensure that it is appended only once \n if parsed_tweet not in tweets: \n tweets.append(parsed_tweet) \n else: \n tweets.append(parsed_tweet) \n \n # return parsed tweets \n return tweets \n \n except tweepy.TweepError as e: \n # print error (if any) \n print(\"Error : \" + str(e)) \n \ndef main(): \n # creating object of TwitterClient Class \n api = TwitterClient() \n # calling function to get tweets \n tweets = api.get_tweets(query = 'Donald Trump', count = 200) \n \n # picking positive tweets from tweets \n ptweets = [tweet for tweet in tweets if tweet['sentiment'] == 'positive'] \n # percentage of positive tweets \n print(\"Positive tweets percentage: {} %\".format(100*len(ptweets)/len(tweets))) \n # picking negative tweets from tweets \n ntweets = [tweet for tweet in tweets if tweet['sentiment'] == 'negative'] \n # percentage of negative tweets \n print(\"Negative tweets percentage: {} %\".format(100*len(ntweets)/len(tweets))) \n # percentage of neutral tweets \n print(\"Neutral tweets percentage: {} % \\ \n \".format(100*len(tweets - ntweets - ptweets)/len(tweets))) \n \n # printing first 5 positive tweets \n print(\"\\n\\nPositive tweets:\") \n for tweet in ptweets[:10]: \n print(tweet['text']) \n \n # printing first 5 negative tweets \n print(\"\\n\\nNegative tweets:\") \n for tweet in ntweets[:10]: \n print(tweet['text']) \n \nif __name__ == \"__main__\": \n # calling main function \n main() ",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code"
]
] |
4a18f8b25b1197037489091db0b93f16552de372
| 54,750 |
ipynb
|
Jupyter Notebook
|
Codes/Structuring the Model/VGG19 Training(AHE Segmented)[-7].ipynb
|
pushkaraggrawal/COVID-Net
|
e0a16f3c0ef9e7f23fcd763206be6bd5bab1895b
|
[
"MIT"
] | null | null | null |
Codes/Structuring the Model/VGG19 Training(AHE Segmented)[-7].ipynb
|
pushkaraggrawal/COVID-Net
|
e0a16f3c0ef9e7f23fcd763206be6bd5bab1895b
|
[
"MIT"
] | null | null | null |
Codes/Structuring the Model/VGG19 Training(AHE Segmented)[-7].ipynb
|
pushkaraggrawal/COVID-Net
|
e0a16f3c0ef9e7f23fcd763206be6bd5bab1895b
|
[
"MIT"
] | 1 |
2021-01-30T10:44:40.000Z
|
2021-01-30T10:44:40.000Z
| 54,750 | 54,750 | 0.85726 |
[
[
[
"# **Model Training**",
"_____no_output_____"
],
[
"Importing Basic Libraries and setting input stream for training and testing data",
"_____no_output_____"
]
],
[
[
"import keras",
"Using TensorFlow backend.\n"
],
[
"from keras.layers import Input, Dense, Lambda, Flatten\nfrom keras.layers import Dropout\nfrom keras.models import Model\n#From Keras.applications.vgg16 import VGG16\nfrom keras.applications.vgg19 import VGG19\nfrom keras.applications.vgg16 import preprocess_input\nfrom keras.preprocessing import image\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nimport numpy as np\nfrom glob import glob\nimport matplotlib.pyplot as plt\n\n#Re-size all the images\nIMAGE_SIZE = [224,224]\n\ntrain_path = '/content/drive/My Drive/Final Segmented Dataset/Train'\ntest_path = '/content/drive/My Drive/Final Segmented Dataset/Test'",
"_____no_output_____"
],
[
"import tensorflow as tf\ndevice_name = tf.test.gpu_device_name()\nif device_name != '/device:GPU:0':\n raise SystemError('GPU device not found')\nprint('Found GPU at: {}'.format(device_name))",
"Found GPU at: /device:GPU:0\n"
],
[
"from google.colab import drive\ndrive.mount('/content/drive')",
"Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&response_type=code&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly\n\nEnter your authorization code:\n··········\nMounted at /content/drive\n"
],
[
"train_datagen = ImageDataGenerator(rescale = 1./255,\n shear_range = 0.2,\n zoom_range = 0.2,\n horizontal_flip = True)\n\ntest_datagen = ImageDataGenerator(rescale = 1./255)\n\ntraining_set = train_datagen.flow_from_directory(train_path,\n target_size = (224, 224),\n batch_size = 32,\n class_mode = 'binary')\n\ntest_set = test_datagen.flow_from_directory(test_path,\n target_size = (224, 224),\n batch_size = 32,\n class_mode = 'binary')",
"Found 2010 images belonging to 2 classes.\nFound 600 images belonging to 2 classes.\n"
],
[
"from tensorflow.keras import models\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import optimizers\nimport os\nimport glob\nimport shutil\nimport sys\nimport numpy as np\nfrom skimage.io import imread\nimport matplotlib.pyplot as plt\nfrom IPython.display import Image\nimport vis\nget_ipython().run_line_magic('matplotlib', 'inline')\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"Adding Preprocessing layer to the front of VGG",
"_____no_output_____"
]
],
[
[
"IMAGE_SIZE = [224, 224]\n\nvgg = VGG19(input_shape=IMAGE_SIZE + [3], weights='imagenet', include_top=False)",
"Downloading data from https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5\n80142336/80134624 [==============================] - 7s 0us/step\n"
],
[
"for layer in vgg.layers[:-7]:\n layer.trainable = False\n \n# our layers - you can add more if you want\nx = Flatten()(vgg.output)\nx = Dense(100, activation = 'relu')(x)\nx = Dropout(0.35)(x)\nx = Dense(100, activation = 'relu')(x)\nx = Dropout(0.35)(x)\nx = Dense(100, activation = 'relu')(x)\nx = Dropout(0.35)(x)\nprediction = Dense(1, activation='sigmoid')(x)\n#prediction = Dense(2, activation='softmax')(x)",
"_____no_output_____"
],
[
"def scheduler(epoch, lr):\n if epoch < 20:\n return lr\n else:\n return float(lr * tf.math.exp(-0.1))\nfrom keras import callbacks\n\ncallback = [callbacks.EarlyStopping(monitor='loss', patience=5), callbacks.LearningRateScheduler(scheduler, verbose=0)]",
"_____no_output_____"
],
[
"# create a model object\nmodel = Model(inputs=vgg.input, outputs=prediction)\n\n# view the structure of the model\n#model.summary()",
"_____no_output_____"
],
[
"model.compile(\n loss='binary_crossentropy', \n optimizer='adam',\n metrics=['accuracy']\n)\n#loss='categorical_crossentropy',",
"_____no_output_____"
],
[
"history = model.fit_generator(training_set,\n steps_per_epoch = len(training_set),\n epochs = 50, callbacks = callback,\n validation_data = test_set,\n validation_steps = len(test_set))\nlen(history.history['loss']) # Only 4 epochs are run.",
"Epoch 1/50\n 9/63 [===>..........................] - ETA: 14:05 - loss: 0.8919 - accuracy: 0.4539"
],
[
"# loss\nplt.plot(history.history['loss'], label='train loss')\nplt.plot(history.history['val_loss'], label='val loss')\nplt.legend()\nplt.show()\nplt.savefig('LossVal_loss')",
"_____no_output_____"
],
[
"# accuracies\nplt.plot(history.history['accuracy'], label='train acc')\nplt.plot(history.history['val_accuracy'], label='val acc')\nplt.legend()\nplt.show()\nplt.savefig('AccVal_acc')",
"_____no_output_____"
],
[
"import tensorflow as tf\nfrom keras.models import load_model\nmodel.save('/content/drive/My Drive/VGG19AS7.h5')",
"_____no_output_____"
],
[
"from keras.preprocessing import image\nfrom keras.applications.vgg19 import preprocess_input, decode_predictions\nimport numpy as np\n\nimg_path = '/content/drive/My Drive/Final Segmented Dataset/Validation/COVID Positive/X(977).jpg'\nimg = image.load_img(img_path, target_size=(224, 224)) #change to 224 , 224 , 3 if doesn't work\nimg_data = image.img_to_array(img)\nimg_data = np.expand_dims(img_data, axis=0)\nimg_data = preprocess_input(img_data)\n\npreds = model.predict(img_data)\n\n# decode the results into a list of tuples (class, description, probability)\nprint('Predicted:', preds ) #decode_predictions(preds)",
"Predicted: [[0.49981105]]\n"
],
[
"from keras.preprocessing import image\nfrom keras.applications.vgg19 import preprocess_input, decode_predictions\nimport numpy as np\n\nimg_path = '/content/drive/My Drive/Final Segmented Dataset/Validation/COVID Negative/X(377).png'\nimg = image.load_img(img_path, target_size=(224, 224)) #change to 224 , 224 , 3 if doesn't work\nimg_data = image.img_to_array(img)\nimg_data = np.expand_dims(img_data, axis=0)\nimg_data = preprocess_input(img_data)\n\npreds = model.predict(img_data)\n\n# decode the results into a list of tuples (class, description, probability)\nprint('Predicted:', preds ) #decode_predictions(preds)",
"Predicted: [[0.49981105]]\n"
],
[
"from keras.preprocessing import image\nfrom keras.applications.vgg19 import preprocess_input, decode_predictions\nimport numpy as np\n\nimg_path = '/content/drive/My Drive/Sample blacked out/CN.png'\nimg = image.load_img(img_path, target_size=(224, 224)) #change to 224 , 224 , 3 if doesn't work\nimg_data = image.img_to_array(img)\nimg_data = np.expand_dims(img_data, axis=0)\nimg_data = preprocess_input(img_data)\n\npreds = model.predict(img_data)\n\n# decode the results into a list of tuples (class, description, probability)\nprint('Predicted:', preds ) #decode_predictions(preds)",
"Predicted: [[0.49981105]]\n"
],
[
"from keras.preprocessing import image\nfrom keras.applications.vgg19 import preprocess_input, decode_predictions\nimport numpy as np\n\nimg_path = '/content/drive/My Drive/Sample blacked out/CP1.jpg'\nimg = image.load_img(img_path, target_size=(224, 224)) #change to 224 , 224 , 3 if doesn't work\nimg_data = image.img_to_array(img)\nimg_data = np.expand_dims(img_data, axis=0)\nimg_data = preprocess_input(img_data)\n\npreds = model.predict(img_data)\n\n# decode the results into a list of tuples (class, description, probability)\nprint('Predicted:', preds ) #decode_predictions(preds)",
"Predicted: [[0.49981105]]\n"
],
[
"END",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a18feb31bb276db4990043057b8558a1569dab7
| 565,860 |
ipynb
|
Jupyter Notebook
|
lectures/Week3.ipynb
|
simonmoesorensen/socialdata2022
|
c4adebb6dc8b635041e999195b97c1c850d3049a
|
[
"MIT"
] | null | null | null |
lectures/Week3.ipynb
|
simonmoesorensen/socialdata2022
|
c4adebb6dc8b635041e999195b97c1c850d3049a
|
[
"MIT"
] | null | null | null |
lectures/Week3.ipynb
|
simonmoesorensen/socialdata2022
|
c4adebb6dc8b635041e999195b97c1c850d3049a
|
[
"MIT"
] | null | null | null | 521.049724 | 108,635 | 0.938534 |
[
[
[
"# Week 3\n\nI hope you're getting the hang of things. Today we're going on with the prinicples of data visualization!",
"_____no_output_____"
],
[
"## Overview\n\nOnce again, the lecture has three parts:\n\n* First you will watch a video on visualization and solve a couple of exercises.\n* After that, we'll be reading about *scientific data visualization*, and the huge number of things you can do with just one variable. Naturally, we'll be answering questions about that book. \n* And finally reproducing some of the plots from that book.",
"_____no_output_____"
],
[
"## Part 1: Fundamentals of data visualization",
"_____no_output_____"
],
[
"Last week we had a small introduction of data visualization. Today, we are going to be a bit more specific on data analysis and visualization. Digging a bit more into the theory with the next video.\n\n<mark>*It's important to highlight that these lectures are quite important. We don't have a formal book on data visualization. So the only source of knowledge about the **principles**, **theories**, and **ideas**, that are the foundation for good data viz, comes from the videos*. So watch them 🤓 </mark>\n\n[](https://www.youtube.com/watch?v=yiU56codNlI)",
"_____no_output_____"
],
[
"> *Excercise 1.1:* Questions for the lecture\n> * As mentioned earlier, visualization is not the only way to test for correlation. We can (for example) calculate the Pearson correlation. Explain in your own words how the Pearson correlation works and write down it's mathematical formulation. Can you think of an example where it fails (and visualization works)?\n> **Answer:** The pearson correlation is defined as $\\rho(x,y) = \\frac{cov(x,y)}{\\sigma_x\\sigma_y}$ that is the covariance between x and y divided by their standard deviations multiplied. If $\\rho > 0$ there is a positive correlation and $\\rho < 0$ there is a negative correlation. For $\\rho = 0$ there is no correlation. The pearson correlation can only capture linear correlation of variables, thus if we had $y = x**2$ then $\\rho(x,y) = 0$ but the visualization would show a quadratic polynomial.\n> * What is the difference between a bar-chart and a histogram?\n> **Answer:** A bar-chart shows the count over some criteria or group, usually requires two variables. The histogram shows the frequency or density of one variable, thus showing its distribution.\n> * I mention in the video that it's important to choose the right bin-size in histograms. But how do you do that? Do a Google search to find a criterion you like and explain it.\n> **Answer:** A common approach is that $numberofbins = ceil( \\frac{(maximumvalue - minimumvalue)}{binwidth})$ that accounts for the range of the data (width in the plot) divided by the bin width which gives you how many bins there are room for in the range.",
"_____no_output_____"
],
[
"Ok, now that we've talked a bit about correlation and distributions, we are going to compute/visualize them while also testing some hypotheses along the way. Until now, we have analysed data at an explorative level, but we can use statistics to verify whether relationships between variables are significant. We'll do this in the following exercise.\n\n### *Exercise 1.2:*\n> Hypothesis testing. We will look into correlations between number of steps and BMI, and differences between two data samples (Females vs Males). Follow the steps below for success:\n> * First, we need to get some data. Download and read the data from the Female group [here](https://raw.githubusercontent.com/suneman/socialdata2022/main/files/data9b_f.csv) and the one from the Male group [here](https://raw.githubusercontent.com/suneman/socialdata2022/main/files/data9b_m.csv).\n>",
"_____no_output_____"
]
],
[
[
"import pandas as pd\n\nfemale = pd.read_csv(\"https://raw.githubusercontent.com/suneman/socialdata2022/main/files/data9b_f.csv\")\nmale = pd.read_csv(\"https://raw.githubusercontent.com/suneman/socialdata2022/main/files/data9b_m.csv\")",
"_____no_output_____"
]
],
[
[
"> * Next, we are going to verify the following hypotheses:\n> 1. <mark>*H1: there is a statistically significant difference in the average number of steps taken by men and women*</mark>. Is there a statistically significant difference between the two groups? What is the difference between their mean number of steps? Plot two histograms to visualize the step-count distributions, and use the criterion you chose in Ex.1.1 to define the right bin-size.\n **Hint** you can use the function `ttest_ind()` from the `stats` package to test the hypothesis and consider a significance level $\\alpha=0.05$.\n> 2. <mark>*H2: there is a negative correlation between the number of steps and the BMI for women*.</mark> We will use Pearson's correlation here. Is there a negative correlation? How big is it?\n> 3. <mark>*H3: there is a positive correlation between the number of steps and the BMI for men*.</mark> Is there a positive correlation? Compare it with the one you found for women.\n>",
"_____no_output_____"
]
],
[
[
"import seaborn as sns\nimport matplotlib.pyplot as plt\n\nsns.set_theme()\n\n\ndef get_bins(data, binwidth=1000):\n return range(min(data), max(data) + binwidth, binwidth)",
"_____no_output_____"
]
],
[
[
"#### Hypothesis 1\n> There is a statistically significant difference in the average number of steps taken by men and women .",
"_____no_output_____"
]
],
[
[
"\nf, (ax1, ax2) = plt.subplots(1, 2)\nfemale['steps'].hist(bins=get_bins(female['steps'], 1000), ax=ax1)\nax1.set_title('Step-count histogram for females')\n\nmale['steps'].hist(bins=get_bins(male['steps'], 1000), ax=ax2)\nax2.set_title('Step-count histogram for males')\n\nplt.show()",
"_____no_output_____"
],
[
"from scipy.stats import ttest_ind\n\nres = ttest_ind(female['steps'], male['steps'])\n\nprint(f'Is there a significant difference between male and female step counts? {res.pvalue < 0.05}')",
"Is there a significant difference between male and female step counts? True\n"
]
],
[
[
"#### Hypothesis 2\nThere is a negative correlation between the number of steps and the BMI for women",
"_____no_output_____"
]
],
[
[
"print(f\"The correlation between female step counts and BMI is: {female.corr().iloc[0, 1]:.3}\")",
"The correlation between female step counts and BMI is: -0.356\n"
]
],
[
[
"#### Hypothesis 3\nThere is a positive correlation between the number of steps and the BMI for men",
"_____no_output_____"
]
],
[
[
"print(f\"The correlation between male step counts and BMI is: {male.corr().iloc[0, 1]:.3}\")",
"The correlation between male step counts and BMI is: -0.16\n"
]
],
[
[
"We see that both males and females have a negative correlation between their step count and BMI. However, women seem to have a larger negative correlation between walking and losing weight. Perhaps, they spend more energy when they walk, or they just include more exercise when they walk... this can be investigated using different datasets.",
"_____no_output_____"
],
[
"> * We have now gathered the results. Can you find a possible explanation for what you observed? You don't need to come up with a grand theory about mobility and gender, just try to find something (e.g. theory, news, papers, further analysis etc.) to support your conclusions and write down a couple of sentences.",
"_____no_output_____"
],
[
"As talked about previously, the activities around when women walk could affect the relationship more, and the fact that they walk more as an exercise than men. It could also be the fact that men just have a larger calorie intake than women in general and the diminishes the effect of walking. As a conclusion, it seems that women tend to burn more fat than men.",
"_____no_output_____"
],
[
"\n> *Exercise 1.3:* scatter plots. We're now going to fully visualize the data from the previous exercise.\n>\n> * Create a scatter plot with both data samples. Use `color='#f6756d'` for one <font color=#f6756d>sample</font> and `color='#10bdc3'` for the other <font color=#10bdc3>sample</font>. The data is in front of you, what do you observe? Take a minute to think about these exercises: what do you think the point is?\n * After answering the questions above, have a look at this [paper](https://genomebiology.biomedcentral.com/track/pdf/10.1186/s13059-020-02133-w.pdf) (in particular, read the *Not all who wander are lost* section).",
"_____no_output_____"
]
],
[
[
"f, ax1 = plt.subplots(1, 1, figsize=(5, 5))\nfemale.plot(kind='scatter', x='steps', y='bmi', color='#f6756d', ax=ax1, label='female')\nax1.set_title('Bmi vs steps for females and males')\nmale.plot(kind='scatter', x='steps', y='bmi', color='#10bdc3', ax=ax1, label='male')\nplt.legend()\nplt.show()",
"_____no_output_____"
]
],
[
[
"Obviously, the point is to show that one can perform a plethora of statistical analysis but without visualization we will rarely be able to understand the nature of the data. Which in this case is that it's clearly user-made and does not actually represent the step count vs bmi relationship for men and women.",
"_____no_output_____"
],
[
"\n> * The scatter plot made me think of another point we often overlook: *color-vision impairments*. When visualizing and explaining data, we need to think about our audience:\n> * We used the same colors as in the paper, try to save the figure and use any color-blindness simulator you find on the web ([this](https://www.color-blindness.com/coblis-color-blindness-simulator/) was the first that came out in my browser). Are the colors used problematic? Explain why, and try different types of colors. If you are interested in knowing more you can read this [paper](https://www.tandfonline.com/doi/pdf/10.1179/000870403235002042?casa_token=MAYp78HctgQAAAAA:AZKSHJWuNmoMXD5Dtqln1Sc-xjNwCe6UVDMVEpP95UjTH3O1H-NKRkfYljw2VLSm_zKlN74Da6g).\n> * But, are colors the only option we have? Find an alternative to colors, explain it, and change your scatter plot accordingly.",
"_____no_output_____"
],
[
"* The colors change especially for 'red-blind/protanopia' color blind people. Then it can be difficult to distinguish between the two different points.\n* An alternative to colors are different symbols such as crosses, squares, triangles, striped lines and so on. And it is often preferred in addition to colors!",
"_____no_output_____"
]
],
[
[
"f, ax1 = plt.subplots(1, 1, figsize=(8, 8))\nfemale.plot(kind='scatter', x='steps', y='bmi', color='#f6756d', marker='o', ax=ax1, label='female')\nax1.set_title('Bmi vs steps for females and males')\n\nmale.plot(kind='scatter', x='steps', y='bmi', color='#10bdc3', ax=ax1, marker='x', label='male')\nplt.legend()\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Part 2: Reading about the theory of visualization\n\nSince we can go deeper with the visualization this year, we are going to read the first couple of chapters from [*Data Analysis with Open Source Tools*](http://shop.oreilly.com/product/9780596802363.do) (DAOST). It's pretty old, but I think it's a fantastic resource and one that is pretty much as relevant now as it was back then. The author is a physicist (like Sune) so he likes the way he thinks. And the books takes the reader all the way from visualization, through modeling to computational mining. Anywho - it's a great book and well worth reading in its entirety. \n\nAs part of this class we'll be reading the first chapters. Today, we'll read chaper 2 (the first 28 pages) which supports and deepens many of the points we made during the video above. \n\nTo find the text, you will need to go to **DTU Learn**. It's under \"Course content\" $\\rightarrow$ \"Content\" $\\rightarrow$ \"Lecture 3 reading\".",
"_____no_output_____"
],
[
"> *Excercise 2*: Questions for DAOST\n\n * Explain in your own words the point of the jitter plot\n\n> **Answer:** The point of a jitter plot is to avoid the case of a dot plot where points lie directly on top of each other. The jitter plot solves this by introducing a small random noise\n\n * Explain in your own words the point of figure 2-3. (I'm going to skip saying in your own words going forward, but I hope you get the point; I expect all answers to be in your own words)\n\n> **Answer:** It aims to show that histograms are not always perfect out of the box. They require some thinking with respect to anchoring of the bins, the bin width and number of bins in order to accurately represent the distribution\n\n * The author of DAOST (Philipp Janert) likes KDEs (and think they're better than histograms). And we don't. Sune didn't give a detailed explanation in the video, but now that works to our advantage. We'll ask you to think about this and thereby create an excellent exercise: When can KDEs be misleading\n\n> **Answer:** KDEs can produce smooth representations of dataset whilst also accounting for outliers. However, they are highly influenced by the choice of hyperparameters such as the bandwidth. The data scientist may choose to make a dataset seem larger and more significant than it actually is if the bandwidth is chosen 'correctly'. Moreover, KDEs can cause performance issues for larger datasets. The choice of bandwidth is often a bias-variance trade-off as described in the book which leads to complicated techniques such as cross-validation for producing just a simple plot of a distribution\n\n* Sune discussed some strengths of the CDF - there are also weaknesses. Janert writes CDFs have less intuitive appeal than histograms of KDEs. What does he mean by that\n\n> **Answer:** The value on a CDF plot are always influenced by the previous values which means that the reader has to keep in mind all the previous value up until a certain point and subtract that from the next point to get the change in density from one point to another. This is quite cumbersome in comparison to just looking at a bin for each range that in itself describes how many values there are in the given range. Moreover, instantaneous changes in CDFs are hard to notice for both small and large changes as it is the gradient of the slope that describes the change, and not the value of the line\n\n* What is a *Quantile plot*? What is it good for\n\n> **Answer:** They make the CDF more interpretable as the reader can read from the y axis to the axis. Asking question such as: What response tim\ncorresponds to the 10th percentile of response times\n\n* How is a *Probablity plot* defined? What is it useful for? Have you ever seen one before\n\n> **Answer:** The probability plot is defined as the inverse of the gaussian distribution. An by a bit of algebra, you can get a linear relationship of a data set as a function of $\\phi^{-1} (y_i)$ with intercept $\\mu$ and slope $\\sigma$. This means that any normal distributed variable should fall on a straight line. However, if it does not, it means that the data is not distributed according to a normal distribution\n\n* One of the reasons we like DAOST is that Janert is so suspicious of mean, median, and related summary statistics. Explain why one has to be careful when usin\nthose - and why visualization of the full data is always better\n\n> **Answer:** First off, they apply only under certain assumptions and are misleading if those assumptions are not fulfilled. Those assumptions are for example that the data is *unimodal*. By visualizing the data it can quickly be inspected if the distribution is uni or bi or more modal\n\n* Sune loves box plots (but not enough to own one of [these](https://twitter.com/statisticiann/status/1387454947143426049) 😂). When are box plots most useful\n\n> **Answer:** Box plots are great to represent outliers and a dataset's percentile and median values. It also allows us to see if the data set is symmetric and how the data distributes around the mean. They are often best when comparing multiple distributions against each other\n\n* The book doesn't mention [violin plots](https://en.wikipedia.org/wiki/Violin_plot). Are those better or worse than box plots? Why\n\n> **Answer:** A violin plot is a hybrid of the KDE and box plot. This means that violin plots can additionally show density of the distribution and not only its summary statistics! Since it provides more information, it can definitely be better than a box plot. However, one should care that the KDE does not become over representative and show things that may be misleading.",
"_____no_output_____"
],
[
"## Part 3: *Finally*! Let's create some visualizations",
"_____no_output_____"
],
[
"### *Exercise 3.1*: Connecting the dots and recreating plots from DAOST but using our own favorite dataset.\n>\n> * Let's make a jitter-plot (that is, code up something like **Figure 2-1** from DAOST from scratch), but based on *SF Police data*. My hunch from inspecting the file is that the police-folks might be a little bit lazy in noting down the **exact** time down to the second. So choose a crime-type and a suitable time interval (somewhere between a month and 6 months depending on the crime-type) and create a jitter plot of the arrest times during a single hour (like 13-14, for example). So let time run on the $x$-axis and create vertical jitter.",
"_____no_output_____"
]
],
[
[
"df_raw = pd.read_csv('Police_Department_Incident_Reports__Historical_2003_to_May_2018.csv')\ndf_raw.Date = pd.to_datetime(df_raw['Date']) + pd.to_timedelta(df_raw['Time'] + ':00')",
"_____no_output_____"
],
[
"df = df_raw[df_raw.Date.dt.year < 2018]",
"_____no_output_____"
],
[
"from datetime import datetime\nimport numpy as np",
"_____no_output_____"
],
[
"df_plot_1 = df[\n (df.Date >= datetime(2016, 8, 1, 13)) & (df.Date <= datetime(2016, 10, 1, 14)) & (df.Category == 'ASSAULT')]",
"_____no_output_____"
],
[
"df_plot_1 = df_plot_1[(df_plot_1.Date.dt.hour >= 13) & (df_plot_1.Date.dt.hour <= 14)]",
"_____no_output_____"
],
[
"fig = plt.figure(figsize=(15,7))\ng = sns.scatterplot(x=df_plot_1['Time'].sort_values(), y=np.random.random(len(df_plot_1)))\nplt.xticks(rotation=85)\nplt.ylim([-1, 2])\nplt.title('Jitterplot of Robberies from August 2016 to October 2016 in the time interval 13:00 to 14:00')\nplt.show()",
"_____no_output_____"
]
],
[
[
"> * Last time, we did lots of bar-plots. Today, we'll play around with histograms (creating two crime-data based versions of the plot-type shown in DAOST **Figure 2-2**). I think the GPS data could be fun to see this way.\n> * This time, pick two crime-types with different geographical patterns **and** a suitable time-interval for each (you want between 1000 and 10000 points in your histogram)\n> * Then take the latitude part of the GPS coordinates for each crime and bin the latitudes so that you have around 50 bins across the city of SF. You can use your favorite method for binning. I like `numpy.histogram`. This function gives you the counts and then you do your own plotting.",
"_____no_output_____"
]
],
[
[
"df_plot_2 = df[(df.Date >= datetime(2016, 1, 1)) & (df.Date <= datetime(2016, 12, 31)) & (df.Category == 'ROBBERY')]\ndf_plot_2['X'].hist(bins=50)\nplt.xlim([-122.5, -122.375])\nplt.title('Histogram of latitude for robbery in 2016')\nplt.show()",
"_____no_output_____"
],
[
"df_plot_3 = df[\n (df.Date >= datetime(2016, 1, 1)) & (df.Date <= datetime(2016, 12, 31)) & (df.Category == 'PROSTITUTION')]\ndf_plot_3['X'].hist(bins=50)\nplt.xlim([-122.5, -122.375])\nplt.title('Histogram of latitude for prostitution in 2016')\nplt.show()",
"_____no_output_____"
]
],
[
[
"\n> * Next up is using the plot-type shown in **Figure 2-4** from DAOST, but with the data you used to create Figure 2.1. To create the kernel density plot, you can either use `gaussian_kde` from `scipy.stats` ([for an example, check out this stackoverflow post](https://stackoverflow.com/questions/4150171/how-to-create-a-density-plot-in-matplotlib)) or you can use [`seaborn.kdeplot`](https://seaborn.pydata.org/generated/seaborn.kdeplot.html).\n> * Now grab 25 random timepoints from the dataset (of 1000-10000 original data) you've just plotted and create a version of Figure 2-4 based on the 25 data points. Does this shed light on why I think KDEs can be misleading?\n>\n> Let's take a break. Get some coffee or water. Stretch your legs. Talk to your friends for a bit. Breathe. Get relaxed so you're ready for the second part of the exercise.",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots(1, 1)\n\nsns.kdeplot(df_plot_1.Date, label='KDE', ax=ax)\nsns.scatterplot(x=df_plot_1.Date, y=np.random.random(len(df_plot_1)) / 50, color='red', label='Data points', ax=ax)\n\nplt.xticks(rotation=85)\nplt.title('KDEplot of Robberies in August 2016 with data only in time interval 13:00 to 14:00')\nplt.legend()\nplt.show()",
"_____no_output_____"
]
],
[
[
"We see that the KDE does say that some data exists in the tails but in fact there is no data at all..",
"_____no_output_____"
],
[
"### *Exercise 3.2*. Ok. Now for more plots 😊\n> * Now we'll work on creating two versions of the plot in **Figure 2-11**, but using the GPS data you used for your version of Figure 2-2. Comment on the result. It is not easy to create this plot from scracth.\n **Hint:** Take a look at the `scipy.stats.probplot` function.\n",
"_____no_output_____"
]
],
[
[
"from scipy.stats import probplot\n\nres = probplot(df_plot_2.X, plot=plt)\n\nslope = res[1][0]\nintercept = res[1][1]\nlinear_func = f\"data*{slope:.3} + {intercept:.3}\"\nplt.text(x=-3, y=-122.375, s=linear_func)\nplt.title('Probability Plot for Robberies in 2016')\nplt.show()",
"_____no_output_____"
],
[
"res = probplot(df_plot_3.X, plot=plt)\n\nslope = res[1][0]\nintercept = res[1][1]\nlinear_func = f\"data*{slope:.3} + {intercept:.3}\"\nplt.text(x=-3, y=-122.375, s=linear_func)\nplt.title('Probability Plot for Prostitution in 2016')\nplt.show()",
"_____no_output_____"
]
],
[
[
"> * OK, we're almost done, but we need some box plots. Here, I'd like you to use the box plots to visualize fluctuations of how many crimes happen per day. We'll use data from the 15 focus crimes defined last week.\n> * For the full time-span of the data, calulate the **number of crimes per day** within each category for the entire duration of the data.\n> * Create a box-and whiskers plot showing the mean, median, quantiles, etc for all 15 crime-types side-by-side. There are many ways to do this. I like to use [matplotlibs's built in functionality](https://matplotlib.org/api/_as_gen/matplotlib.pyplot.boxplot.html), but you can also achieve good results with [seaborn](https://seaborn.pydata.org/generated/seaborn.boxplot.html) or [pandas](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.boxplot.html).\n> * What does this plot reveal that you can't see in the plots from last time?",
"_____no_output_____"
]
],
[
[
"focuscrimes = {'WEAPON LAWS', 'PROSTITUTION', 'DRIVING UNDER THE INFLUENCE', 'ROBBERY', 'BURGLARY', 'ASSAULT',\n 'DRUNKENNESS', 'DRUG/NARCOTIC', 'TRESPASS', 'LARCENY/THEFT', 'VANDALISM', 'VEHICLE THEFT',\n 'STOLEN PROPERTY', 'DISORDERLY CONDUCT'}\n\ndf_raw = pd.read_csv('Police_Department_Incident_Reports__Historical_2003_to_May_2018.csv')\ndf_raw.Date = pd.to_datetime(df_raw['Date']) + pd.to_timedelta(df_raw['Time'] + ':00')\ndf = df_raw[df_raw.Category.isin(focuscrimes)]",
"_____no_output_____"
],
[
"df.groupby([df.Date.dt.floor('d'), df.Category]).count()['PdId'].reset_index().pivot_table('PdId', ['Date'],\n 'Category').boxplot(\n figsize=(15, 10))\nplt.xticks(rotation=90)\nplt.title('Box plots of focus crimes per day')\nplt.show()",
"_____no_output_____"
]
],
[
[
"This time we are shown the outliers as well as the summary statistics for each category",
"_____no_output_____"
],
[
"> * Also I want to show you guys another interesting use of box plots. To get started, let's calculate another average for each focus-crime, namely what time of day the crime happens. So this time, the distribution we want to plot is the average time-of-day that a crime takes place. There are many ways to do this, but let me describe one way to do it.\n * For datapoint, the only thing you care about is the time-of-day, so discard everything else.\n * You also have to deal with the fact that time is annoyingly not divided into nice units that go to 100 like many other numbers. I can think of two ways to deal with this.\n * For each time-of-day, simply encode it as seconds since midnight.\n * Or keep each whole hour, and convert the minute/second count to a percentage of an hour. So 10:15 $\\rightarrow$ 10.25, 8:40 $\\rightarrow$ 8.67, etc.\n * Now you can create box-plots to create an overview of *when various crimes occur*. Note that these plot have quite a different interpretation than ones we created in the previous exercise. Cool, right?",
"_____no_output_____"
]
],
[
[
"df_new = df[['Category', 'Date']]\ndf_new.head()",
"_____no_output_____"
],
[
"df_new['Minutes_since_midnight'] = df_new.Date.dt.hour * 60 + df_new.Date.dt.minute\ndf_new.head()",
"C:\\Users\\moeso\\AppData\\Local\\Temp/ipykernel_30224/1132509496.py:1: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n df_new['Minutes_since_midnight'] = df_new.Date.dt.hour * 60 + df_new.Date.dt.minute\n"
],
[
"df_new.Date.dt.hour.hist(bins=75)\nplt.title('Histogram of minutes since midnight')\nplt.show()",
"_____no_output_____"
]
],
[
[
"It looks like they are quite inconsistent with logging the time. It usually happens in bulk.",
"_____no_output_____"
]
],
[
[
"# Count occurences for each minute since midnight pr category\ndf_counts = df_new.value_counts(['Minutes_since_midnight', 'Category']).reset_index().rename({0: 'count'}, axis=1)",
"_____no_output_____"
],
[
"N = len(df_new.Minutes_since_midnight.unique())",
"_____no_output_____"
],
[
"df_counts['Avg_pr_minute_since_midnight'] = df_counts.apply(lambda x: x['count'] / N, axis=1)",
"_____no_output_____"
],
[
"df_counts.pivot_table('Minutes_since_midnight', ['Avg_pr_minute_since_midnight'], 'Category').boxplot(figsize=(15,10))\nplt.xticks(rotation=80)\nplt.title('Box plot of which time of day each focus crime happens')\nplt.ylabel('Minutes since midnight')\nplt.show()",
"_____no_output_____"
]
],
[
[
"Pretty cool. Drunkenness happens mostly in the midday! Whilst disorderly conduct happens mostly in the mornings.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
4a190a00e1a320e828f04e73a744f795605dad8f
| 29,954 |
ipynb
|
Jupyter Notebook
|
examples/footprint_cmp.ipynb
|
acpaquette/knoten
|
8341585349d2040cd05026c38ab35d84126b143e
|
[
"Unlicense"
] | 3 |
2021-01-07T05:36:14.000Z
|
2022-03-24T03:54:26.000Z
|
examples/footprint_cmp.ipynb
|
acpaquette/knoten
|
8341585349d2040cd05026c38ab35d84126b143e
|
[
"Unlicense"
] | 54 |
2019-03-27T05:02:07.000Z
|
2022-02-08T13:21:08.000Z
|
examples/footprint_cmp.ipynb
|
acpaquette/knoten
|
8341585349d2040cd05026c38ab35d84126b143e
|
[
"Unlicense"
] | 16 |
2019-03-20T16:16:17.000Z
|
2022-03-10T17:19:32.000Z
| 256.017094 | 26,448 | 0.925185 |
[
[
[
"from plio.io.io_gdal import GeoDataset\nfrom shapely import wkt\nimport knoten.csm as csm\nimport json\nimport matplotlib.pyplot as plt\nimport numpy as np",
"_____no_output_____"
],
[
"cub_loc = '/home/pgiroux/repos/knoten/examples/data/J03_045994_1986_XN_18N282W_sorted.cub'\nold_isd = '/home/pgiroux/Desktop/J03_045994_1986_XN_18N282W_sorted.json'\nnew_isd = '/home/pgiroux/repos/knoten/examples/data/J03_045994_1986_XN_18N282W_sorted.json'\n\n# load ISDs into dictionaries\nold_aledict = json.load(open(old_isd))\nnew_aledict = json.load(open(new_isd))\n\n# make camera models from the ISDs\nold_camera = csm.create_csm(old_isd)\nnew_camera = csm.create_csm(new_isd)\n\n# get lines and samples from ISDs\nolines, osamples = old_aledict[\"image_lines\"], old_aledict[\"image_samples\"]\nnlines, nsamples = new_aledict[\"image_lines\"], new_aledict[\"image_samples\"]\n\n# generate boundaries from ISDs\nold_boundary = csm.generate_boundary((olines, osamples))\nnew_boundary = csm.generate_boundary((nlines, nsamples))\n\n# generate lat lon boundaries\nolons, olats, oalts = csm.generate_latlon_boundary(old_camera, old_boundary, radii=(3396.19,3376.2))\nnlons, nlats, nalts = csm.generate_latlon_boundary(new_camera, new_boundary, radii=(3396.19,3376.2))\n\n# get isis footprint\nisis_wkt = str((wkt.loads(GeoDataset(cub_loc).footprint.ExportToWkt())))\nisis_geom = wkt.loads(isis_wkt)\nisis_lon, isis_lat = isis_geom.geoms[0].exterior.coords.xy\n\n# Convert ISIS longitude to -180/180\nisis_lon_360 = np.asarray(isis_lon)\nisis_lon_180 = np.mod(isis_lon_360 - 180.0, 360.0) - 180.0\n\n# plot footprints\nplt.rcParams[\"figure.figsize\"] = [5, 5]\nplt.axes().set_aspect('equal','datalim')\nplt.xlabel(\"Longitude (deg)\")\nplt.ylabel(\"Latitude (deg)\")\nplt.title(\"Footprint Comparisons\")\noldale_plot, = plt.plot(olons, olats, 'r')\noldale_plot.set_label(\"ALE 0.0.4\")\nnewale_plot, = plt.plot(nlons, nlats, 'b', linewidth=3.0)\nisis_plot, = plt.plot(isis_lon_180, isis_lat, 'y', linewidth=1.5)\n\nisis_plot.set_label('ISIS CUBE')\nnewale_plot.set_label(\"ALE 0.2.0\")\nplt.legend()\n\nplt.show()",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code"
]
] |
4a190f975f50987848e6e13739d2b29dc20668c1
| 14,530 |
ipynb
|
Jupyter Notebook
|
class-2020-04-24/Spark Introduction.ipynb
|
spu-bigdataanalytics-201/class-materials
|
3a7c84d851c4aadd702420e7d4a5d0e80ada0387
|
[
"MIT"
] | null | null | null |
class-2020-04-24/Spark Introduction.ipynb
|
spu-bigdataanalytics-201/class-materials
|
3a7c84d851c4aadd702420e7d4a5d0e80ada0387
|
[
"MIT"
] | 3 |
2021-06-08T21:12:36.000Z
|
2022-03-12T00:22:27.000Z
|
class-2020-04-24/Spark Introduction.ipynb
|
spu-bigdataanalytics-201/class-materials
|
3a7c84d851c4aadd702420e7d4a5d0e80ada0387
|
[
"MIT"
] | 1 |
2020-04-24T23:48:50.000Z
|
2020-04-24T23:48:50.000Z
| 33.712297 | 189 | 0.439573 |
[
[
[
"## Using Spark in Google Colab",
"_____no_output_____"
]
],
[
[
"# download and Java and Spark\n! apt-get install openjdk-8-jdk-headless -qq > /dev/null\n! wget -q https://archive.apache.org/dist/spark/spark-2.4.5/spark-2.4.5-bin-hadoop2.7.tgz\n! tar xf spark-2.4.5-bin-hadoop2.7.tgz\n! pip install -q findspark",
"_____no_output_____"
],
[
"# set the environment variables for spark\nimport os\nos.environ[\"JAVA_HOME\"] = \"/usr/lib/jvm/java-8-openjdk-amd64\"\nos.environ[\"SPARK_HOME\"] = \"/content/spark-2.4.5-bin-hadoop2.7\"",
"_____no_output_____"
],
[
"! python -m pip install pyspark",
"Collecting pyspark\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/9a/5a/271c416c1c2185b6cb0151b29a91fff6fcaed80173c8584ff6d20e46b465/pyspark-2.4.5.tar.gz (217.8MB)\n\u001b[K |████████████████████████████████| 217.8MB 65kB/s \n\u001b[?25hCollecting py4j==0.10.7\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/e3/53/c737818eb9a7dc32a7cd4f1396e787bd94200c3997c72c1dbe028587bd76/py4j-0.10.7-py2.py3-none-any.whl (197kB)\n\u001b[K |████████████████████████████████| 204kB 51.4MB/s \n\u001b[?25hBuilding wheels for collected packages: pyspark\n Building wheel for pyspark (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for pyspark: filename=pyspark-2.4.5-py2.py3-none-any.whl size=218257927 sha256=68313c482b7fd80d18475a57ddc198080791bdf0f4e54b2793f6c5283c58cf9a\n Stored in directory: /root/.cache/pip/wheels/bf/db/04/61d66a5939364e756eb1c1be4ec5bdce6e04047fc7929a3c3c\nSuccessfully built pyspark\nInstalling collected packages: py4j, pyspark\nSuccessfully installed py4j-0.10.7 pyspark-2.4.5\n"
]
],
[
[
"### Use of Spark",
"_____no_output_____"
]
],
[
[
"from pyspark.sql import SparkSession",
"_____no_output_____"
],
[
"spark = SparkSession.builder \\\n .master(\"local[*]\") \\\n .appName(\"Learning_Spark\") \\\n .getOrCreate()",
"_____no_output_____"
],
[
"spark.version",
"_____no_output_____"
],
[
"df = spark.read.csv('sample_data/california_housing_test.csv')",
"_____no_output_____"
],
[
"df.printSchema()",
"root\n |-- _c0: string (nullable = true)\n |-- _c1: string (nullable = true)\n |-- _c2: string (nullable = true)\n |-- _c3: string (nullable = true)\n |-- _c4: string (nullable = true)\n |-- _c5: string (nullable = true)\n |-- _c6: string (nullable = true)\n |-- _c7: string (nullable = true)\n |-- _c8: string (nullable = true)\n\n"
],
[
"# action\ndf.select('_c0', '_c1').take(2)",
"_____no_output_____"
],
[
"# transformation\ndf.select('_c0', '_c1')",
"_____no_output_____"
],
[
"df.select('_c0', '_c1').show()",
"+-----------+---------+\n| _c0| _c1|\n+-----------+---------+\n| longitude| latitude|\n|-122.050000|37.370000|\n|-118.300000|34.260000|\n|-117.810000|33.780000|\n|-118.360000|33.820000|\n|-119.670000|36.330000|\n|-119.560000|36.510000|\n|-121.430000|38.630000|\n|-120.650000|35.480000|\n|-122.840000|38.400000|\n|-118.020000|34.080000|\n|-118.240000|33.980000|\n|-119.120000|35.850000|\n|-121.930000|37.250000|\n|-117.030000|32.970000|\n|-117.970000|33.730000|\n|-117.990000|33.810000|\n|-120.810000|37.530000|\n|-121.200000|38.690000|\n|-118.880000|34.210000|\n+-----------+---------+\nonly showing top 20 rows\n\n"
],
[
"df.show()",
"+-----------+---------+------------------+-----------+--------------+-----------+----------+-------------+------------------+\n| _c0| _c1| _c2| _c3| _c4| _c5| _c6| _c7| _c8|\n+-----------+---------+------------------+-----------+--------------+-----------+----------+-------------+------------------+\n| longitude| latitude|housing_median_age|total_rooms|total_bedrooms| population|households|median_income|median_house_value|\n|-122.050000|37.370000| 27.000000|3885.000000| 661.000000|1537.000000|606.000000| 6.608500| 344700.000000|\n|-118.300000|34.260000| 43.000000|1510.000000| 310.000000| 809.000000|277.000000| 3.599000| 176500.000000|\n|-117.810000|33.780000| 27.000000|3589.000000| 507.000000|1484.000000|495.000000| 5.793400| 270500.000000|\n|-118.360000|33.820000| 28.000000| 67.000000| 15.000000| 49.000000| 11.000000| 6.135900| 330000.000000|\n|-119.670000|36.330000| 19.000000|1241.000000| 244.000000| 850.000000|237.000000| 2.937500| 81700.000000|\n|-119.560000|36.510000| 37.000000|1018.000000| 213.000000| 663.000000|204.000000| 1.663500| 67000.000000|\n|-121.430000|38.630000| 43.000000|1009.000000| 225.000000| 604.000000|218.000000| 1.664100| 67000.000000|\n|-120.650000|35.480000| 19.000000|2310.000000| 471.000000|1341.000000|441.000000| 3.225000| 166900.000000|\n|-122.840000|38.400000| 15.000000|3080.000000| 617.000000|1446.000000|599.000000| 3.669600| 194400.000000|\n|-118.020000|34.080000| 31.000000|2402.000000| 632.000000|2830.000000|603.000000| 2.333300| 164200.000000|\n|-118.240000|33.980000| 45.000000| 972.000000| 249.000000|1288.000000|261.000000| 2.205400| 125000.000000|\n|-119.120000|35.850000| 37.000000| 736.000000| 166.000000| 564.000000|138.000000| 2.416700| 58300.000000|\n|-121.930000|37.250000| 36.000000|1089.000000| 182.000000| 535.000000|170.000000| 4.690000| 252600.000000|\n|-117.030000|32.970000| 16.000000|3936.000000| 694.000000|1935.000000|659.000000| 4.562500| 231200.000000|\n|-117.970000|33.730000| 27.000000|2097.000000| 325.000000|1217.000000|331.000000| 5.712100| 222500.000000|\n|-117.990000|33.810000| 42.000000| 161.000000| 40.000000| 157.000000| 50.000000| 2.200000| 153100.000000|\n|-120.810000|37.530000| 15.000000| 570.000000| 123.000000| 189.000000|107.000000| 1.875000| 181300.000000|\n|-121.200000|38.690000| 26.000000|3077.000000| 607.000000|1603.000000|595.000000| 2.717400| 137500.000000|\n|-118.880000|34.210000| 26.000000|1590.000000| 196.000000| 654.000000|199.000000| 6.585100| 300000.000000|\n+-----------+---------+------------------+-----------+--------------+-----------+----------+-------------+------------------+\nonly showing top 20 rows\n\n"
],
[
"df.groupBy(\"_c0\") \\\n.count() \\\n.orderBy(\"count\", ascending=False) \\\n.show(10)",
"+-----------+-----+\n| _c0|count|\n+-----------+-----+\n|-118.210000| 26|\n|-118.260000| 26|\n|-118.280000| 25|\n|-118.290000| 25|\n|-118.270000| 25|\n|-118.300000| 24|\n|-118.140000| 23|\n|-118.350000| 22|\n|-118.020000| 21|\n|-118.330000| 21|\n+-----------+-----+\nonly showing top 10 rows\n\n"
],
[
"",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a191b2dd97b17edf3ab6e211d382d5c7228f129
| 4,907 |
ipynb
|
Jupyter Notebook
|
Calculus_Homework/WWB15.3.ipynb
|
NSC9/Sample_of_Work
|
8f8160fbf0aa4fd514d4a5046668a194997aade6
|
[
"MIT"
] | null | null | null |
Calculus_Homework/WWB15.3.ipynb
|
NSC9/Sample_of_Work
|
8f8160fbf0aa4fd514d4a5046668a194997aade6
|
[
"MIT"
] | null | null | null |
Calculus_Homework/WWB15.3.ipynb
|
NSC9/Sample_of_Work
|
8f8160fbf0aa4fd514d4a5046668a194997aade6
|
[
"MIT"
] | null | null | null | 19.947154 | 130 | 0.451396 |
[
[
[
"from IPython.display import Image\nfrom IPython.core.display import HTML \nfrom sympy import *\nImage(url= \"https://i.imgur.com/f9pPzLu.png\")",
"_____no_output_____"
],
[
"L = Function(\"L\")\nf = Function(\"f\")\nx,a = symbols('x a')\nL(x)",
"_____no_output_____"
],
[
"Eq(L(x) , f(a) + Derivative(f(a),a)*(x-a))",
"_____no_output_____"
],
[
"expr = 1/(sqrt(5-x))\nx1 = 0\nEq(f(x), expr)",
"_____no_output_____"
],
[
"eq1 = Eq(f(x), expr).subs(x,x1)\neq1",
"_____no_output_____"
],
[
"fa = eq1.rhs\ndef f(x):\n return expr\ndf = diff(f(x),x)\neq2 = Eq(Derivative(f(x)),df).subs(x,x1)\neq2",
"_____no_output_____"
],
[
"dfa= eq2.rhs\nEq(L(x) , fa+ dfa*(x-x1)).evalf()",
"_____no_output_____"
],
[
"print(Eq(L(x) , fa+ dfa*(x-x1)).evalf().rhs)",
"0.0447213595499958*x + 0.447213595499958\n"
],
[
"Image(url= \"https://i.imgur.com/tmicKO3.png\")",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a19285ff48f3ee72d5e090ebb1db06c982e308b
| 9,242 |
ipynb
|
Jupyter Notebook
|
2021/Singles/PAE-NumPy-and-Matplotlib-part1.ipynb
|
Muramatsu2602/python-bootcamp
|
2f5dc35afbe6bfba553c8c667f73c70f21a4a69e
|
[
"MIT"
] | null | null | null |
2021/Singles/PAE-NumPy-and-Matplotlib-part1.ipynb
|
Muramatsu2602/python-bootcamp
|
2f5dc35afbe6bfba553c8c667f73c70f21a4a69e
|
[
"MIT"
] | null | null | null |
2021/Singles/PAE-NumPy-and-Matplotlib-part1.ipynb
|
Muramatsu2602/python-bootcamp
|
2f5dc35afbe6bfba553c8c667f73c70f21a4a69e
|
[
"MIT"
] | null | null | null | 9,242 | 9,242 | 0.601385 |
[
[
[
"# Numpy",
"_____no_output_____"
]
],
[
[
"import numpy as np\nfrom numpy import *",
"_____no_output_____"
]
],
[
[
"A estrutura de dados base do *numpy* sao os **arrays**",
"_____no_output_____"
]
],
[
[
"import numpy as np\n\n# criando um array unidimensional a partir de uma lista\nlst = [1,3,5,7,9,10]\na1d = np.array(lst)\nprint(a1d)\n\nprint(lst)",
"[ 1 3 5 7 9 10]\n[1, 3, 5, 7, 9, 10]\n"
],
[
"b1d = np.zeros((8))\nprint('ald=', b1d)\n\nb1d = np.ones((8))\nprint('bld=', b1d)\n\nb1d = np.arange((8))\nprint('cld=', b1d)\n\nb1d = np.linspace(1,2,5)\nprint('dld=', b1d)\n",
"ald= [0. 0. 0. 0. 0. 0. 0. 0.]\nbld= [1. 1. 1. 1. 1. 1. 1. 1.]\ncld= [0 1 2 3 4 5 6 7]\ndld= [1. 1.25 1.5 1.75 2. ]\n"
]
],
[
[
"## Criando arrays bidimensionais a partir de uma lista de listas",
"_____no_output_____"
]
],
[
[
"a2d = np.array([[1,3,5,7,9],\n [2,4,6,10,12],\n [0,1,2,3,4]])\n\na2d",
"_____no_output_____"
],
[
"import numpy as np\n\nb2d = np.zeros((5,10))\nprint(b2d)\n\nc2d = np.identity(4)\nprint(c2d)",
"[[0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]]\n[[1. 0. 0. 0.]\n [0. 1. 0. 0.]\n [0. 0. 1. 0.]\n [0. 0. 0. 1.]]\n"
]
],
[
[
"Numpy arrays sao objetos chamados _ndarrays_ e possuem diversos **atributos**:\n\n- _ndarray.ndim_ - numero de eixos (dimensoes do array)\n- _ndarray.shape_ - uma tupla de inteiros indicando o tamanho do array em cada dimensao\n- _ndarray.size_ - o numero total de elementos no array\n- _ndarray.dtype_ - tipo dos elementos no array\n- _ndarray.itemsize_ 0 o tamanho em bytes de cada elemento do array\n- _ndarray.data_ - o buffer de memoria contendo os elementos do array",
"_____no_output_____"
]
],
[
[
"a = np.zeros((5))\nb = np.zeros((5,1))\nprint('a.shape', a.shape)\nprint('b.shape', b.shape)\nprint('a.ndim', a.ndim)\nprint('b.ndim', b.ndim)",
"a.shape (5,)\nb.shape (5, 1)\na.ndim 1\nb.ndim 2\n"
]
],
[
[
"## Percorrendo elementos com um *loop*\nA iteracao eh feita por linhas , se o que se busca sao os elementos, deve-se utilizar um laco duplo ou entao a versao 'flat' do array",
"_____no_output_____"
]
],
[
[
"# percorrendo as linhas\nfor i,row in enumerate(a2d):\n print('linha ',i, ' = ', row)\n\nprint(5*'--', 'iterando o array')\n#perorrendo elementos\nfor i,r in enumerate(a2d):\n for j,e in enumerate(r):\n print('elemento ', i , ' ', j, ' = ', e)\n\nprint(5*'--', 'utilizando indices')\n#pode-se tamem utilizar indices diretamente\nfor i in range(a2d.shape[0]):\n for j in range(a2d.shape[1]):\n print('elemento ',i, ' ', j, ' = ', a2d[i,j])\n\nprint(5*'--', 'utilizando flat')",
"linha 0 = [1 3 5 7 9]\nlinha 1 = [ 2 4 6 10 12]\nlinha 2 = [0 1 2 3 4]\n---------- iterando o array\nelemento 0 0 = 1\nelemento 0 1 = 3\nelemento 0 2 = 5\nelemento 0 3 = 7\nelemento 0 4 = 9\nelemento 1 0 = 2\nelemento 1 1 = 4\nelemento 1 2 = 6\nelemento 1 3 = 10\nelemento 1 4 = 12\nelemento 2 0 = 0\nelemento 2 1 = 1\nelemento 2 2 = 2\nelemento 2 3 = 3\nelemento 2 4 = 4\n---------- utilizando indices\nelemento 0 0 = 1\nelemento 0 1 = 3\nelemento 0 2 = 5\nelemento 0 3 = 7\nelemento 0 4 = 9\nelemento 1 0 = 2\nelemento 1 1 = 4\nelemento 1 2 = 6\nelemento 1 3 = 10\nelemento 1 4 = 12\nelemento 2 0 = 0\nelemento 2 1 = 1\nelemento 2 2 = 2\nelemento 2 3 = 3\nelemento 2 4 = 4\n---------- utilizando flat\n"
]
],
[
[
"## Slicing arrays\nA melhor forma de se percorrer um array eh por meio de _slicing_ , evitando uso de loops for, que sao computacionalmente muito pesados.\n\n- Array slicing funciona como em listas, mas em multiplas dimensoes\n- Omitir um indice corresponde a recuperar toda a dimensao omitida\n- Um slice eh uma visao (*VIEW*) do array original (similar a uma referencia), isto eh, o dado nao eh copiado",
"_____no_output_____"
]
],
[
[
"a2d = np.array([[1,3,5,7,9],\n [2,4,6,10,12],\n [0,1,2,3,4]])\n\n\n# slicing the lines\nprint('a2d[1, :] - Recupera a linha de indice 1 (equivalent to a2d[1])\\n', a2d[1, :])\n\n# slicing the columns\nprint('a2d[:, 2] - Recupera a coluna de indice 2\\n', a2d[:, 2])\n\n# slicing the blocks\nprint('a2d[1:, 2:5] - Recupera o bloco a partir da linha de indice 1 e colunas 2,3 e 4\\n', a2d[1:, 2:5])\n",
"a2d[1, :] - Recupera a linha de indice 1 (equivalent to a2d[1])\n [ 2 4 6 10 12]\na2d[:, 2] - Recupera a coluna de indice 2\n [5 6 2]\na2d[1:, 2:5] - Recupera o bloco a partir da linha de indice 1 e colunas 2,3 e 4\n [[ 6 10 12]\n [ 2 3 4]]\n"
]
],
[
[
"## Metodos flatten, ravel e reshape\n\n- O metodo reshape permite reformatar o array mofidicando o numero de linhas e colunas, porem, a nova 'shape' deve possuir o mesmo numero de elementos do array original\n\n- O metodo ravel concatena as linhas da matriz em um array unidimensional\n\n- O Metodo flatten tambem concatena as linhas da matriz em um array unidimensional, porem, faz uma copia dos elementos. O Metodo ravel gera uma **view**, portanto se algum elemento for modificado, o array original tambem o eh\n",
"_____no_output_____"
]
],
[
[
"# criando um array randomico unidimensional e dps tranformando ele em uma matriz 3x3\na = np.arange(9).reshape((3,3))\nprint('a:\\n', a)\n\n# concatenando as linhas da matriz em um vetor\nb = a.ravel()\nprint('b: \\n', b)\n\n",
"a:\n [[0 1 2]\n [3 4 5]\n [6 7 8]]\nb: \n [0 1 2 3 4 5 6 7 8]\n"
],
[
"x = np.arange(18).reshape(3,6)\n\n# calculando uma mascara booleana onde o valor True\n# corresponde aos elementos maiores que 7\nmask = (x > 7)\n\n# mascara booleana da matriz para a condicao dada\nprint(mask)\n\n# recuperando apenas os valores que sao aprovados pela mascara\nprint(x[mask])\n\n# Zerando apenas os elementos que passaram na mascara\nx[mask] = 0\nprint(x)\n\n",
"[[False False False False False False]\n [False False True True True True]\n [ True True True True True True]]\n[ 8 9 10 11 12 13 14 15 16 17]\n[[0 1 2 3 4 5]\n [6 7 0 0 0 0]\n [0 0 0 0 0 0]]\n"
]
],
[
[
"## Regarding 'Views'\n\n- Uma **view** eh criada ao fatiar (sliccing) o array\n- Uma **view** eh uma referencia a uma parte de um array\n- Alterar elementos da **view** afeta o array original\n- Se necessario, voce pode explicitamente fazer uma copia utilizando o metodo **copy**\n",
"_____no_output_____"
],
[
"## I/O with Numpy\nNumpy fornece metodos para ler e escrever arrays em arquivos. A sintaxe basica para leitura de arrays a partir de um arquivo eh",
"_____no_output_____"
]
],
[
[
"nome_array = np.loadtxt('nome_do_arquivo')",
"_____no_output_____"
]
],
[
[
"---\n\n# Broadcasting",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a192c9b826baa4440e3540c93c31f81bc580573
| 25,631 |
ipynb
|
Jupyter Notebook
|
code/displacement-metrics/1.daily_modal_metrics_computation.ipynb
|
shikharmehra/afghanistan-internal-displacement
|
a98877b14bbd2e6fefb75e872bbfbcbb4c3eb89e
|
[
"CC-BY-4.0"
] | null | null | null |
code/displacement-metrics/1.daily_modal_metrics_computation.ipynb
|
shikharmehra/afghanistan-internal-displacement
|
a98877b14bbd2e6fefb75e872bbfbcbb4c3eb89e
|
[
"CC-BY-4.0"
] | null | null | null |
code/displacement-metrics/1.daily_modal_metrics_computation.ipynb
|
shikharmehra/afghanistan-internal-displacement
|
a98877b14bbd2e6fefb75e872bbfbcbb4c3eb89e
|
[
"CC-BY-4.0"
] | null | null | null | 40.684127 | 634 | 0.497679 |
[
[
[
"# Add user specific python libraries to path\nimport sys\nsys.path.insert(0, \"/home/smehra/local-packages\")\nprint(sys.path)",
"['/home/smehra/local-packages', '', '/home/smehra/.conda/envs/smehra_py2/lib/python27.zip', '/home/smehra/.conda/envs/smehra_py2/lib/python2.7', '/home/smehra/.conda/envs/smehra_py2/lib/python2.7/plat-linux2', '/home/smehra/.conda/envs/smehra_py2/lib/python2.7/lib-tk', '/home/smehra/.conda/envs/smehra_py2/lib/python2.7/lib-old', '/home/smehra/.conda/envs/smehra_py2/lib/python2.7/lib-dynload', '/home/smehra/.local/lib/python2.7/site-packages', '/home/smehra/.conda/envs/smehra_py2/lib/python2.7/site-packages', '/home/smehra/.conda/envs/smehra_py2/lib/python2.7/site-packages/IPython/extensions', '/home/smehra/.ipython']\n"
],
[
"import geopandas as gpd\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nimport pyspark.sql.functions as F\n",
"_____no_output_____"
],
[
"import os\nos.environ[\"SPARK_CONF_DIR\"] = \"/data/tmp/spark/conf\"",
"_____no_output_____"
],
[
"import pyspark\nimport random\n\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql import HiveContext\n\nconfig = pyspark.SparkConf().setAll([('spark.ui.port', 4050), \n ('spark.ui.enabled', True),\n \n # if running in local mode, driver will be only executor\n # hence, give driver as much memory as possible if running in local mode\n ('spark.driver.memory','50g'), \n \n # set up executor config if running in cluster or client mode\n #('spark.executor.instances', '5'), \n #('spark.executor.cores', '5'), \n #('spark.executor.memory', '5g'), \n #('spark.executor.memoryOverhead', '500m'),\n \n # more partitions means smaller partition size per task\n # hence, would reduce memory load\n ('spark.sql.shuffle.partitions', '1000'),\n \n # increase max result size if you are \"collecting\" big dataset \n # driver will need more memory to collect\n #('spark.driver.maxResultSize', '2g'),\n \n # set location spark should use for temporary data\n ('spark.local.dir', '/data/tmp/smehra/tmp'),\n # Set location of hive database\n ('spark.sql.warehouse.dir', '/data/tmp/hive_warehouse'),\n # Add mysql connector jar to use mysql as metastore service\n ('spark.jars', '/data/tmp/spark/jars/mysql-connector-java-5.1.30-bin.jar'),\n \n # KryoSerializer is faster and more compact than the Java default serializer.\n ('spark.serializer', 'org.apache.spark.serializer.KryoSerializer'),\n \n # G1GC overcomes the latency and throughput limitations with the old garbage collectors.\n ('spark.executor.extraJavaOptions','-XX:+UseG1GC')])\n\nspark = SparkSession.builder \\\n .enableHiveSupport() \\\n .config(conf=config) \\\n .master(\"local[30]\") \\\n .appName(\"smehra_afgh_project\") \\\n .getOrCreate()\n\n# Get the Hive Context\nhive = HiveContext(spark.sparkContext)\n\nspark.sparkContext._conf.getAll()\n",
"_____no_output_____"
],
[
"# Read from Hive\nraw_data_phone_calls = hive.sql('SELECT * FROM afghanistan.raw_data_phone_calls')\nraw_data_phone_calls.show(5)",
"+----------------+--------+---------+----------------+---------+---------+-----------+-------------------+---------+-------------+---------+---------------+----------------+----------+------+---------+--------+---------+\n| phoneHash1|numtype1|ctrycode1| phoneHash2| numtype2|ctrycode2|interaction| datetime|yearmonth|call_duration|call_cost| antenna_id|charged_duration|product_id|f_type|f_subtype|pay_type|subcos_id|\n+----------------+--------+---------+----------------+---------+---------+-----------+-------------------+---------+-------------+---------+---------------+----------------+----------+------+---------+--------+---------+\n|7orJ23R7GEYKqV1b| mobile| 93|6LzVvQam1jvQMdG5|shortcode| 93| call|2014-10-04 20:22:24| 2014-10| 9| 0.0|412204210242333| 0| 15| SC| SC| 0| 400003|\n|9zgKqvx9LoxjQWve| mobile| 93|LNyd29oEmzOdl8zP| mobile| 93| call|2014-10-04 20:22:24| 2014-10| 4| 0.25|412204110520835| 15| 507000| ONNET| ONNET| 0| 400001|\n|305xqe0kZ0RVlXGg| mobile| 93|mOKlk5Aw1a4ME2p1| intl| 971| call|2014-10-04 20:22:24| 2014-10| 740| 65.0|412203810238053| 780| 504009| INTL| INTL| 0| 400007|\n|y4rZqRp9JabjQDMK| mobile| 93|J305xqe80PglXGgW|shortcode| 93| call|2014-10-04 20:22:24| 2014-10| 193| 0.0|412201010628586| 0| 15| SC| SC| 0| 400003|\n|edyL2yx10Vz0qjA8| mobile| 93|3kEwqYmeW535qpJN| mobile| 93| call|2014-10-04 20:22:24| 2014-10| 185| 11.38|412204110420356| 195| 701006|OFFNET| ETAFG| 0| 400001|\n+----------------+--------+---------+----------------+---------+---------+-----------+-------------------+---------+-------------+---------+---------------+----------------+----------+------+---------+--------+---------+\nonly showing top 5 rows\n\n"
],
[
"# Our methodology assumes a day starts at 5am and ends on 5am next day\n# Hence, we calculate \"effective\" date and time respectively.\n\n# Example:\n# Actual datetime: 2013-04-21 3.40am\n# Effective date: 2013-04-20\n# Effective hour: 23nd hour of the day\n\nraw_data_phone_calls_with_effective_time = raw_data_phone_calls.withColumn('effective_datetime', F.col('datetime') - F.expr(\"INTERVAL 5 HOURS\"))\n",
"_____no_output_____"
],
[
"# keep and reformat columns needed for migration detection algorithm\n\nraw_data_for_migration_detection = raw_data_phone_calls_with_effective_time.withColumn('date', F.date_format(F.col(\"effective_datetime\"), \"YYYYMMdd\"))\n \nraw_data_for_migration_detection = raw_data_for_migration_detection.select(F.col('phoneHash1').alias(\"user_id\"), \n F.col('date'), \n F.col(\"antenna_id\"))\nraw_data_for_migration_detection.show(5)\n",
"+----------------+--------+---------------+\n| user_id| date| antenna_id|\n+----------------+--------+---------------+\n|7orJ23R7GEYKqV1b|20141004|412204210242333|\n|9zgKqvx9LoxjQWve|20141004|412204110520835|\n|305xqe0kZ0RVlXGg|20141004|412203810238053|\n|y4rZqRp9JabjQDMK|20141004|412201010628586|\n|edyL2yx10Vz0qjA8|20141004|412204110420356|\n+----------------+--------+---------------+\nonly showing top 5 rows\n\n"
],
[
"# remove null and invalid values\nraw_data_for_migration_detection_filtered = raw_data_for_migration_detection.filter(raw_data_for_migration_detection.user_id.isNotNull()\n & (raw_data_for_migration_detection.user_id != \"-99\")\n & raw_data_for_migration_detection.date.isNotNull()\n & (raw_data_for_migration_detection.date != \"-99\")\n & raw_data_for_migration_detection.antenna_id.isNotNull()\n & (raw_data_for_migration_detection.antenna_id != -99))\n",
"_____no_output_____"
],
[
"# Load tower to antenna mapping data\ntower_to_antenna_map = spark.read.csv('/data/projects/displacement_afghanistan/data/Aggregated_Groups/TowerDetails_WithGroupIDs_UTM42N.csv', header = True, inferSchema=True)\ntower_to_antenna_map = tower_to_antenna_map.select(F.col('Final_Agg_GroupID').alias(\"tower_group_id\"), F.col('callingcellid').alias(\"antenna_id\"))\ntower_to_antenna_map.show(5)\n",
"+--------------+---------------+\n|tower_group_id| antenna_id|\n+--------------+---------------+\n| 856|412200000000000|\n| 856|412200010229701|\n| 856|412200010229702|\n| 856|412200010229703|\n| 856|412200010229704|\n+--------------+---------------+\nonly showing top 5 rows\n\n"
],
[
"# join daily modal location with tower to district mapping\nraw_data_with_tower_groups = raw_data_for_migration_detection_filtered.join(tower_to_antenna_map, \n raw_data_for_migration_detection_filtered.antenna_id == tower_to_antenna_map.antenna_id, \n how = 'left').select(raw_data_for_migration_detection_filtered['*'], tower_to_antenna_map['tower_group_id'])\nraw_data_with_tower_groups.show(5)\n",
"+----------------+--------+---------------+--------------+\n| user_id| date| antenna_id|tower_group_id|\n+----------------+--------+---------------+--------------+\n|7orJ23R7GEYKqV1b|20141004|412204210242333| 345|\n|9zgKqvx9LoxjQWve|20141004|412204110520835| 123|\n|305xqe0kZ0RVlXGg|20141004|412203810238053| 676|\n|y4rZqRp9JabjQDMK|20141004|412201010628586| 1054|\n|edyL2yx10Vz0qjA8|20141004|412204110420356| 69|\n+----------------+--------+---------------+--------------+\nonly showing top 5 rows\n\n"
]
],
[
[
"## User Daily Unique Towers",
"_____no_output_____"
]
],
[
[
"raw_data_with_tower_locations = raw_data_with_tower_groups.select(F.col('user_id'), \n F.col(\"date\"),\n F.col(\"tower_group_id\").alias('location'))\nraw_data_with_tower_locations.show(5)\n",
"+----------------+--------+--------+\n| user_id| date|location|\n+----------------+--------+--------+\n|7orJ23R7GEYKqV1b|20141004| 345|\n|9zgKqvx9LoxjQWve|20141004| 123|\n|305xqe0kZ0RVlXGg|20141004| 676|\n|y4rZqRp9JabjQDMK|20141004| 1054|\n|edyL2yx10Vz0qjA8|20141004| 69|\n+----------------+--------+--------+\nonly showing top 5 rows\n\n"
],
[
"## drop duplicates and sort data\nraw_data_with_tower_groups_deduped = raw_data_with_tower_locations.dropDuplicates()\nraw_data_with_tower_groups_deduped_sorted = raw_data_with_tower_groups_deduped.sort([\"user_id\", \"date\", \"location\"])\n\n# save in hive\nraw_data_with_tower_groups_deduped_sorted.write.saveAsTable('afghanistan.user_daily_unique_towers_long')\n\nuser_daily_unique_towers_long = hive.sql('SELECT * FROM afghanistan.user_daily_unique_towers_long')",
"_____no_output_____"
],
[
"# convert long from to wide form dataset\n# one row per user\n# one column per day\n# value of each cell represent all towers user used on that day\n\n# note: collect_set dedups location values.\n# use collect_list if you need *all* location values for a day for a user\nuser_daily_unique_towers_wide = user_daily_unique_towers_long.groupby('user_id').pivot('date').agg(F.collect_set('location'))\nuser_daily_unique_towers_wide.show(100)\n",
"_____no_output_____"
],
[
"# a list of day series i.e from 20130101 to 20171231\ndaySeriesList = set()\n\nfor year in range(2013, 2018):\n for month in [1, 3, 5, 7, 8, 10, 12]:\n for day in range(1, 32):\n daySeriesList.add(str(year) + (\"%02d\"%month) + (\"%02d\"%day))\n \n for month in [4, 6, 9, 11]:\n for day in range(1, 31):\n daySeriesList.add(str(year) + (\"%02d\"%month) + (\"%02d\"%day))\n \n for day in range(1, 30):\n if(day == 29):\n if(year%4 == 0):\n daySeriesList.add(str(year) + \"02\" + (\"%02d\"%day))\n else:\n daySeriesList.add(str(year) + \"02\" + (\"%02d\"%day))\n",
"_____no_output_____"
],
[
"# add empty columns for days for which we did not have any users making any calls\n\n# existing list of columns in user_daily_unique_towers_wide table\nexistingDaySeriesColumns = user_daily_unique_towers_wide.columns\nexistingDaySeriesColumns.remove('user_id')\nexistingDaySeriesColumns = set(existingDaySeriesColumns)\n\nmissingColumns = daySeriesList.difference(existingDaySeriesColumns)\nprint('missing columns: ', missingColumns)\n\nfor newColumn in missingColumns:\n user_daily_unique_towers_wide = user_daily_unique_towers_wide.withColumn(str(newColumn), F.array())\n print('added column: ', newColumn)",
"_____no_output_____"
],
[
"# save in hive\nuser_daily_unique_towers_wide.write.saveAsTable('afghanistan.user_daily_unique_towers_wide')",
"_____no_output_____"
]
],
[
[
"## User Daily Unique Districts",
"_____no_output_____"
]
],
[
[
"# Load tower to district mapping data\ntower_to_district_map = spark.read.csv('/data/projects/displacement_afghanistan/data/Aggregated_Groups/Final_Aggregated_GroupIDs_UTM42N.csv', header = True, inferSchema=True)\ntower_to_district_map = tower_to_district_map.select(F.col('Final_Agg_GroupID').alias(\"tower_group_id\"), F.col('distid').alias(\"district_id\"))\ntower_to_district_map.show(5)\n",
"+--------------+-----------+\n|tower_group_id|district_id|\n+--------------+-----------+\n| 0| 2008|\n| 1| 2008|\n| 2| 2007|\n| 3| 2008|\n| 4| 2008|\n+--------------+-----------+\nonly showing top 5 rows\n\n"
],
[
"raw_data_with_districts = raw_data_with_tower_groups.join(tower_to_district_map, \n raw_data_with_tower_groups.tower_group_id == tower_to_district_map.tower_group_id, \n how = 'left').select(raw_data_with_tower_groups['*'], tower_to_district_map['district_id'])\n\nraw_data_with_districts = raw_data_with_districts.select(F.col('user_id'), \n F.col(\"date\"),\n F.col(\"district_id\").alias('location'))\nraw_data_with_districts.show(5)\n\n",
"+----------------+--------+--------+\n| user_id| date|location|\n+----------------+--------+--------+\n|7orJ23R7GEYKqV1b|20141004| 1701|\n|9zgKqvx9LoxjQWve|20141004| 2001|\n|305xqe0kZ0RVlXGg|20141004| 2918|\n|y4rZqRp9JabjQDMK|20141004| 101|\n|edyL2yx10Vz0qjA8|20141004| 2001|\n+----------------+--------+--------+\nonly showing top 5 rows\n\n"
],
[
"## drop duplicates and sort data\nraw_data_with_districts_deduped = raw_data_with_districts.dropDuplicates()\nraw_data_with_districts_deduped_sorted = raw_data_with_districts_deduped.sort([\"user_id\", \"date\", \"location\"])\n\n# save in hive\nraw_data_with_districts_deduped_sorted.write.saveAsTable('afghanistan.user_daily_unique_districts_long')\n\nuser_daily_unique_districts_long = hive.sql('SELECT * FROM afghanistan.user_daily_unique_districts_long')",
"_____no_output_____"
],
[
"# convert long from to wide form dataset\n# one row per user\n# one column per day\n# value of each cell represent all towers user used on that day\n\n# note: collect_set dedups location values.\n# use collect_list if you need *all* location values for a day for a user\n\nuser_daily_unique_districts_wide = user_daily_unique_districts_long.groupby('user_id').pivot('date').agg(F.collect_set('location'))\nuser_daily_unique_districts_wide.show(100)\n",
"_____no_output_____"
],
[
"# a list of day series i.e from 20130101 to 20171231\ndaySeriesList = set()\n\nfor year in range(2013, 2018):\n for month in [1, 3, 5, 7, 8, 10, 12]:\n for day in range(1, 32):\n daySeriesList.add(str(year) + (\"%02d\"%month) + (\"%02d\"%day))\n \n for month in [4, 6, 9, 11]:\n for day in range(1, 31):\n daySeriesList.add(str(year) + (\"%02d\"%month) + (\"%02d\"%day))\n \n for day in range(1, 30):\n if(day == 29):\n if(year%4 == 0):\n daySeriesList.add(str(year) + \"02\" + (\"%02d\"%day))\n else:\n daySeriesList.add(str(year) + \"02\" + (\"%02d\"%day))\n",
"_____no_output_____"
],
[
"# add empty columns for days for which we did not have any users making any calls\n\n# existing list of columns in user_daily_unique_towers_wide table\nexistingDaySeriesColumns = user_daily_unique_districts_wide.columns\nexistingDaySeriesColumns.remove('user_id')\nexistingDaySeriesColumns = set(existingDaySeriesColumns)\n\nmissingColumns = daySeriesList.difference(existingDaySeriesColumns)\nprint('missing columns: ', missingColumns)\n\nfor newColumn in missingColumns:\n user_daily_unique_districts_wide = user_daily_unique_districts_wide.withColumn(str(newColumn), F.array())\n print('added column: ', newColumn)",
"_____no_output_____"
],
[
"# save in hive\nuser_daily_unique_districts_wide.write.saveAsTable('afghanistan.user_daily_unique_districts_wide')",
"_____no_output_____"
],
[
"spark.stop()",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a192f0012e03dee80a2f0a1b63d1e254465be81
| 44,958 |
ipynb
|
Jupyter Notebook
|
starter_code/model_1_binaryencode.ipynb
|
prakashbalasubramaniam/machine-learning-challenge
|
ed919187d9675729b04fce4857ba5888c9b32eab
|
[
"ADSL"
] | null | null | null |
starter_code/model_1_binaryencode.ipynb
|
prakashbalasubramaniam/machine-learning-challenge
|
ed919187d9675729b04fce4857ba5888c9b32eab
|
[
"ADSL"
] | null | null | null |
starter_code/model_1_binaryencode.ipynb
|
prakashbalasubramaniam/machine-learning-challenge
|
ed919187d9675729b04fce4857ba5888c9b32eab
|
[
"ADSL"
] | null | null | null | 35.014019 | 252 | 0.434183 |
[
[
[
"# Update sklearn to prevent version mismatches\n#!pip install sklearn --upgrade",
"_____no_output_____"
],
[
"# install joblib. This will be used to save your model. \n# Restart your kernel after installing \n#!pip install joblib",
"_____no_output_____"
],
[
"# Import library\nimport pandas as pd",
"_____no_output_____"
]
],
[
[
"# Read the CSV and Perform Basic Data Cleaning",
"_____no_output_____"
]
],
[
[
"# Read csv file in\ndf = pd.read_csv(\"exoplanet_data.csv\")\n# Drop the null columns where all values are null\ndf = df.dropna(axis='columns', how='all')\n# Drop the null rows\ndf = df.dropna()\ndf.head()",
"_____no_output_____"
]
],
[
[
"# Select your features (columns)",
"_____no_output_____"
]
],
[
[
"# Set features. This will also be used as your x values.\nselected_features = df[['koi_fpflag_nt','koi_fpflag_ss','koi_fpflag_co','koi_fpflag_ec','koi_period','koi_time0bk',\n 'koi_impact','koi_duration','koi_depth','koi_prad','koi_teq','koi_insol','koi_model_snr',\n 'koi_tce_plnt_num','koi_steff','koi_slogg','koi_srad','ra','dec','koi_kepmag']]\nfeature_names = selected_features.columns\nfeature_names",
"_____no_output_____"
]
],
[
[
"# Create a Train Test Split\n\nUse `koi_disposition` for the y values",
"_____no_output_____"
]
],
[
[
"# Set disposition for y\ntarget = df[\"koi_disposition\"]\ntarget_names = [\"CANDIDATE\", \"CONFIRMED\", \"FALSE POSITIVE\"]\n\n# Do Binary Encoding\ndata = target.copy()\ndata_binary_encoded = pd.get_dummies(data)\ndata_binary_encoded.head()",
"_____no_output_____"
],
[
"# Test split data, test data = 20% with stratify\nfrom sklearn.model_selection import train_test_split\n\ntest_size=0.40\nrandom_state=42\nstratify=data_binary_encoded\nX_train, X_test, y_train, y_test = train_test_split(selected_features, data_binary_encoded, \n test_size=test_size, random_state=random_state, stratify=stratify)\n#X_train, X_test, y_train, y_test = train_test_split(selected_features, target, test_size=0.20, random_state=42)",
"_____no_output_____"
],
[
"# Check 1st 5 entries of X_train\nX_train.head()",
"_____no_output_____"
],
[
"# Check 1st 5 entries of y_train\ny_train.head()",
"_____no_output_____"
]
],
[
[
"# Pre-processing\n\nScale the data using the MinMaxScaler and perform some feature selection",
"_____no_output_____"
]
],
[
[
"# Scale, fit and transform data\nfrom sklearn.preprocessing import MinMaxScaler\nscaler = MinMaxScaler()\nX_scaler = scaler.fit(X_train)\nX_train_scaled = X_scaler.transform(X_train)\nX_test_scaled = X_scaler.transform(X_test)",
"C:\\Users\\j6921\\Anaconda3\\lib\\site-packages\\sklearn\\preprocessing\\data.py:334: DataConversionWarning: Data with input dtype int64, float64 were all converted to float64 by MinMaxScaler.\n return self.partial_fit(X, y)\n"
],
[
"# Check 1st 5 entries of X_train_scaled\nX_train_scaled",
"_____no_output_____"
],
[
"# Check min max entries of X_train_scaled\nprint(f\"{X_train_scaled.max()} {X_train_scaled.min()}\")\nprint(f\"{y_train.max()} {y_train.min()}\") ",
"1.0 0.0\nCANDIDATE 1\nCONFIRMED 1\nFALSE POSITIVE 1\ndtype: uint8 CANDIDATE 0\nCONFIRMED 0\nFALSE POSITIVE 0\ndtype: uint8\n"
],
[
"# Check 1st 5 entries of X_test_scaled\nX_test_scaled",
"_____no_output_____"
]
],
[
[
"# Train the Model\n\n",
"_____no_output_____"
]
],
[
[
"#from sklearn import tree RandomForest \nfrom sklearn.ensemble import RandomForestClassifier\nrf = RandomForestClassifier(n_estimators=200)\nrf = rf.fit(X_train_scaled, y_train)\na = rf.score(X_train_scaled, y_train)\nb = rf.score(X_test_scaled, y_test)\n\n# print scores for RandomForest\nprint(f\"{a}, {b}\")",
"1.0, 0.8780836610654272\n"
],
[
"# Display RandomForestClassifier\nrf",
"_____no_output_____"
],
[
"# Display features that influences the model most\nsorted(zip(rf.feature_importances_, feature_names), reverse=True)",
"_____no_output_____"
]
],
[
[
"# Hyperparameter Tuning\n\nUse `GridSearchCV` to tune the model's parameters",
"_____no_output_____"
]
],
[
[
"# Create the GridSearchCV model\nfrom sklearn.model_selection import GridSearchCV\nparam_grid = { \n 'n_estimators': [50, 100, 200, 400, 600],\n 'max_features': ['auto', 'sqrt', 'log2']\n}\ngrid = GridSearchCV(rf, param_grid, verbose=3)",
"_____no_output_____"
],
[
"# Train the model with GridSearch\ngrid.fit(X_train, y_train)",
"Fitting 3 folds for each of 15 candidates, totalling 45 fits\n[CV] max_features=auto, n_estimators=50 ..............................\n"
],
[
"# Display best tuned params and the score \nprint(grid.best_params_)\nprint(grid.best_score_)\n\n# Display and save best tuned param\nprint('Best max_features:', grid.best_params_['max_features'])\nprint('Best n_estimators:', grid.best_params_['n_estimators'])\nmax_features_tune = grid.best_params_['max_features']\nn_estimators_tune = grid.best_params_['n_estimators']",
"{'max_features': 'auto', 'n_estimators': 200}\n0.8907963757749165\nBest max_features: auto\nBest n_estimators: 200\n"
],
[
"# Refit model with new tuned values\nrf_tuned = RandomForestClassifier(n_estimators=n_estimators_tune, max_features=max_features_tune)\nrf_tuned = rf_tuned.fit(X_train_scaled, y_train)\na_tuned = rf_tuned.score(X_train_scaled, y_train)\nb_tuned = rf_tuned.score(X_test_scaled, y_test)\nprint(f\"{a_tuned}, {b_tuned}\")",
"1.0, 0.8816589202717197\n"
]
],
[
[
"# Save the Model",
"_____no_output_____"
]
],
[
[
"# save model to a file\nimport joblib\nfilename = 'prakash_randomforest.sav'\njoblib.dump(rf_tuned, filename)",
"_____no_output_____"
],
[
"print(f\"Random Forest Classifier Model Score Comparison\")\nprint(f\"-\"*95)\nprint(f\"Test Train Sample Size % = {test_size*100}, Random State = {random_state}, Stratify = Yes\")\nprint(f\"Model Train Score = {a}, Model Test Score = {b}\")\nprint(f\"Model Tuned Train Score = {a_tuned}, Model Tuned Test Score = {b_tuned}\")",
"Random Forest Classifier Model Score Comparison\n-----------------------------------------------------------------------------------------------\nTest Train Sample Size % = 40.0, Random State = 42, Stratify = Yes\nModel Train Score = 1.0, Model Test Score = 0.8780836610654272\nModel Tuned Train Score = 1.0, Model Tuned Test Score = 0.8816589202717197\n"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4a1932e74fbdc943eccc513f721185efaf45609d
| 172,772 |
ipynb
|
Jupyter Notebook
|
intro-to-pytorch/Part 5 - Inference and Validation (Exercises)_aw.ipynb
|
anthony-wang/deep-learning-v2-pytorch
|
e4cd494cf9350688b9ee627968877f43271b1858
|
[
"MIT"
] | null | null | null |
intro-to-pytorch/Part 5 - Inference and Validation (Exercises)_aw.ipynb
|
anthony-wang/deep-learning-v2-pytorch
|
e4cd494cf9350688b9ee627968877f43271b1858
|
[
"MIT"
] | 4 |
2020-09-26T01:21:16.000Z
|
2022-02-10T02:10:54.000Z
|
intro-to-pytorch/Part 5 - Inference and Validation (Exercises)_aw.ipynb
|
anthony-wang/deep-learning-v2-pytorch
|
e4cd494cf9350688b9ee627968877f43271b1858
|
[
"MIT"
] | 1 |
2022-03-31T12:26:25.000Z
|
2022-03-31T12:26:25.000Z
| 189.859341 | 59,412 | 0.888454 |
[
[
[
"# Inference and Validation\n\nNow that you have a trained network, you can use it for making predictions. This is typically called **inference**, a term borrowed from statistics. However, neural networks have a tendency to perform *too well* on the training data and aren't able to generalize to data that hasn't been seen before. This is called **overfitting** and it impairs inference performance. To test for overfitting while training, we measure the performance on data not in the training set called the **validation** set. \n\nWe **avoid overfitting through regularization such as dropout** while monitoring the validation performance during training. In this notebook, I'll show you how to do this in PyTorch. \n\nAs usual, let's start by loading the dataset through torchvision. You'll learn more about torchvision and loading data in a later part. This time we'll be taking advantage of the test set which you can get by setting `train=False` here:\n\n```python\ntestset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform)\n```\n\nThe test set contains images just like the training set. Typically you'll see 10-20% of the original dataset held out for testing and validation with the rest being used for training.",
"_____no_output_____"
]
],
[
[
"import torch\nfrom torchvision import datasets, transforms\n\n# Define a transform to normalize the data\ntransform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.5,), (0.5,))])\n\n# Download and load the training data\ntrainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform)\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)\n\n# Download and load the test data\ntestset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform)\ntestloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True)",
"_____no_output_____"
]
],
[
[
"Here I'll create a model like normal, using the same one from my solution for part 4.",
"_____no_output_____"
]
],
[
[
"from torch import nn, optim\nimport torch.nn.functional as F\n\nclass Classifier(nn.Module):\n def __init__(self):\n super().__init__()\n self.fc1 = nn.Linear(784, 256)\n self.fc2 = nn.Linear(256, 128)\n self.fc3 = nn.Linear(128, 64)\n self.fc4 = nn.Linear(64, 10)\n \n def forward(self, x):\n # make sure input tensor is flattened\n x = x.view(x.shape[0], -1)\n \n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n x = F.log_softmax(self.fc4(x), dim=1)\n \n return x",
"_____no_output_____"
]
],
[
[
"The goal of validation is to measure the model's performance on data that isn't part of the training set. Performance here is up to the developer to define though. Typically this is just accuracy, the percentage of classes the network predicted correctly. Other options are [precision and recall](https://en.wikipedia.org/wiki/Precision_and_recall#Definition_(classification_context)) and top-5 error rate. We'll focus on accuracy here. First I'll do a forward pass with one batch from the test set.",
"_____no_output_____"
]
],
[
[
"model = Classifier()\n\nimages, labels = next(iter(testloader))\n# Get the class probabilities\nlogps = model(images)\nps = torch.exp(logps)\n\n# Make sure the shape is appropriate, we should get 10 class probabilities for 64 examples\nprint(ps.shape)",
"torch.Size([64, 10])\n"
]
],
[
[
"With the probabilities, we can get the most likely class using the `ps.topk` method. This returns the $k$ highest values. Since we just want the most likely class, we can use `ps.topk(1)`. This returns a tuple of the top-$k$ values and the top-$k$ indices. If the highest value is the fifth element, we'll get back 4 as the index.",
"_____no_output_____"
]
],
[
[
"top_p, top_class = ps.topk(1, dim=1)\n\n# Look at the most likely classes for the first 10 examples\nprint(top_class[:10,:])\nprint(top_p[:10,:])",
"tensor([[4],\n [4],\n [4],\n [4],\n [4],\n [5],\n [4],\n [5],\n [5],\n [5]])\ntensor([[0.1144],\n [0.1135],\n [0.1100],\n [0.1132],\n [0.1108],\n [0.1149],\n [0.1153],\n [0.1177],\n [0.1145],\n [0.1142]], grad_fn=<SliceBackward>)\n"
]
],
[
[
"Now we can check if the predicted classes match the labels. This is simple to do by equating `top_class` and `labels`, but we have to be careful of the shapes. Here `top_class` is a 2D tensor with shape `(64, 1)` while `labels` is 1D with shape `(64)`. To get the equality to work out the way we want, `top_class` and `labels` must have the same shape.\n\nIf we do\n\n```python\nequals = top_class == labels\n```\n\n`equals` will have shape `(64, 64)`, try it yourself. What it's doing is comparing the one element in each row of `top_class` with each element in `labels` which returns 64 True/False boolean values for each row.",
"_____no_output_____"
]
],
[
[
"equals = top_class == labels.view(*top_class.shape) # <-- why the asterisk?",
"_____no_output_____"
]
],
[
[
"Now we need to calculate the percentage of correct predictions. `equals` has binary values, either 0 or 1. This means that if we just sum up all the values and divide by the number of values, we get the percentage of correct predictions. This is the same operation as taking the mean, so we can get the accuracy with a call to `torch.mean`. If only it was that simple. If you try `torch.mean(equals)`, you'll get an error\n\n```\nRuntimeError: mean is not implemented for type torch.ByteTensor\n```\n\nThis happens because `equals` has type `torch.ByteTensor` but `torch.mean` isn't implemented for tensors with that type. So we'll need to convert `equals` to a float tensor. Note that when we take `torch.mean` it returns a scalar tensor, to get the actual value as a float we'll need to do `accuracy.item()`.",
"_____no_output_____"
]
],
[
[
"accuracy = torch.mean(equals.type(torch.FloatTensor))\nprint(f'Accuracy: {accuracy.item()*100}%')",
"Accuracy: 3.125%\n"
]
],
[
[
"The network is untrained so it's making random guesses and we should see an accuracy around 10%. Now let's train our network and include our validation pass so we can measure how well the network is performing on the test set. Since we're not updating our parameters in the validation pass, we can speed up our code by turning off gradients using `torch.no_grad()`:\n\n```python\n# turn off gradients\nwith torch.no_grad():\n # validation pass here\n for images, labels in testloader:\n ...\n```\n\n>**Exercise:** Implement the validation loop below and print out the total accuracy after the loop. You can largely copy and paste the code from above, but I suggest typing it in because writing it out yourself is essential for building the skill. In general you'll always learn more by typing it rather than copy-pasting. You should be able to get an accuracy above 80%.",
"_____no_output_____"
]
],
[
[
"model = Classifier()\ncriterion = nn.NLLLoss()\noptimizer = optim.Adam(model.parameters(), lr=0.003)\n\nepochs = 30\nsteps = 0\n\ntrain_losses, test_losses = [], []\nfor e in range(epochs):\n running_loss = 0\n print('Epoch', e)\n for images, labels in trainloader:\n \n optimizer.zero_grad()\n \n log_ps = model(images)\n loss = criterion(log_ps, labels)\n loss.backward()\n optimizer.step()\n \n running_loss += loss.item()\n \n else:\n ## TODO: Implement the validation pass and print out the validation accuracy\n # turn off gradients\n test_loss = 0\n accuracy = 0\n with torch.no_grad():\n # validation pass here\n for images, labels in testloader:\n log_ps = model.forward(images)\n ps = torch.exp(log_ps)\n \n loss = criterion(log_ps, labels)\n test_loss += loss\n \n top_p, top_class = ps.topk(1, dim=1)\n equals = top_class == labels.view(*top_class.shape)\n accuracy += torch.mean(equals.type(torch.FloatTensor))\n \n train_losses.append(running_loss/len(trainloader))\n test_losses.append(test_loss/len(testloader))\n \n print('Training loss: {:.4f}'.format(running_loss / len(trainloader)))\n print('Test loss: {:.4f}'.format(test_loss / len(testloader)))\n print('Test accuracy: {:.4f}'.format(accuracy / len(testloader)))",
"Epoch 0\nTraining loss: 0.5117\nTest loss: 0.4505\nTest accuracy: 0.8355\nEpoch 1\nTraining loss: 0.3895\nTest loss: 0.4200\nTest accuracy: 0.8531\nEpoch 2\nTraining loss: 0.3597\nTest loss: 0.3948\nTest accuracy: 0.8608\nEpoch 3\nTraining loss: 0.3339\nTest loss: 0.3677\nTest accuracy: 0.8709\nEpoch 4\nTraining loss: 0.3166\nTest loss: 0.3755\nTest accuracy: 0.8665\nEpoch 5\nTraining loss: 0.3013\nTest loss: 0.3779\nTest accuracy: 0.8652\nEpoch 6\nTraining loss: 0.2913\nTest loss: 0.3904\nTest accuracy: 0.8627\nEpoch 7\nTraining loss: 0.2827\nTest loss: 0.3579\nTest accuracy: 0.8750\nEpoch 8\nTraining loss: 0.2736\nTest loss: 0.3734\nTest accuracy: 0.8703\nEpoch 9\nTraining loss: 0.2720\nTest loss: 0.3985\nTest accuracy: 0.8653\nEpoch 10\nTraining loss: 0.2634\nTest loss: 0.3809\nTest accuracy: 0.8709\nEpoch 11\nTraining loss: 0.2558\nTest loss: 0.3701\nTest accuracy: 0.8718\nEpoch 12\nTraining loss: 0.2469\nTest loss: 0.3523\nTest accuracy: 0.8794\nEpoch 13\nTraining loss: 0.2425\nTest loss: 0.3696\nTest accuracy: 0.8808\nEpoch 14\nTraining loss: 0.2365\nTest loss: 0.4051\nTest accuracy: 0.8705\nEpoch 15\nTraining loss: 0.2307\nTest loss: 0.3918\nTest accuracy: 0.8792\nEpoch 16\nTraining loss: 0.2300\nTest loss: 0.3914\nTest accuracy: 0.8845\nEpoch 17\nTraining loss: 0.2239\nTest loss: 0.4122\nTest accuracy: 0.8757\nEpoch 18\nTraining loss: 0.2190\nTest loss: 0.3534\nTest accuracy: 0.8866\nEpoch 19\nTraining loss: 0.2155\nTest loss: 0.4188\nTest accuracy: 0.8798\nEpoch 20\nTraining loss: 0.2141\nTest loss: 0.3853\nTest accuracy: 0.8868\nEpoch 21\nTraining loss: 0.2067\nTest loss: 0.3969\nTest accuracy: 0.8789\nEpoch 22\nTraining loss: 0.2062\nTest loss: 0.4197\nTest accuracy: 0.8786\nEpoch 23\nTraining loss: 0.2048\nTest loss: 0.3851\nTest accuracy: 0.8819\nEpoch 24\nTraining loss: 0.1961\nTest loss: 0.3772\nTest accuracy: 0.8834\nEpoch 25\nTraining loss: 0.1947\nTest loss: 0.4449\nTest accuracy: 0.8700\nEpoch 26\nTraining loss: 0.1900\nTest loss: 0.3978\nTest accuracy: 0.8854\nEpoch 27\nTraining loss: 0.1894\nTest loss: 0.4271\nTest accuracy: 0.8797\nEpoch 28\nTraining loss: 0.1870\nTest loss: 0.4175\nTest accuracy: 0.8800\nEpoch 29\nTraining loss: 0.1887\nTest loss: 0.4104\nTest accuracy: 0.8846\n"
],
[
"%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\n\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"plt.plot(range(epochs), train_losses, label='Training loss')\nplt.plot(range(epochs), test_losses, label='Validation loss')\nplt.title('Overfitting in action (*without* dropout)')\nplt.xlabel('epochs')\nplt.ylabel('loss')\nplt.legend(loc='best')\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Overfitting\n\nIf we look at the training and validation losses as we train the network, we can see a phenomenon known as overfitting.\n\n<img src='assets/overfitting.png' width=450px>\n\nThe network learns the training set better and better, resulting in lower training losses. However, it starts having problems generalizing to data outside the training set leading to the validation loss increasing. The ultimate goal of any deep learning model is to make predictions on new data, so we should strive to get the lowest validation loss possible.\n\nOne option is to use the version of the model with the lowest validation loss, here the one around 8-10 training epochs. This strategy is called ***early-stopping***. In practice, you'd save the model frequently as you're training then later choose the model with the lowest validation loss.\n\nThe most common method to reduce overfitting (outside of early-stopping) is *dropout*, where we randomly drop input units. This forces the network to share information between weights, increasing its ability to generalize to new data. Adding dropout in PyTorch is straightforward using the [`nn.Dropout`](https://pytorch.org/docs/stable/nn.html#torch.nn.Dropout) module.\n\n```python\nclass Classifier(nn.Module):\n def __init__(self):\n super().__init__()\n self.fc1 = nn.Linear(784, 256)\n self.fc2 = nn.Linear(256, 128)\n self.fc3 = nn.Linear(128, 64)\n self.fc4 = nn.Linear(64, 10)\n \n # Dropout module with 0.2 drop probability\n self.dropout = nn.Dropout(p=0.2)\n \n def forward(self, x):\n # make sure input tensor is flattened\n x = x.view(x.shape[0], -1)\n \n # Now with dropout\n x = self.dropout(F.relu(self.fc1(x)))\n x = self.dropout(F.relu(self.fc2(x)))\n x = self.dropout(F.relu(self.fc3(x)))\n \n # output so no dropout here\n x = F.log_softmax(self.fc4(x), dim=1)\n \n return x\n```\n\n**During training we want to use dropout to prevent overfitting, but during inference we want to use the entire network.**\n\nSo, we need to **turn off dropout during validation, testing**, and whenever we're using the network to make **predictions**. To do this, you use `model.eval()`. This sets the model to evaluation mode where the dropout probability is 0. You can turn dropout back on by setting the model to train mode with `model.train()`. In general, the pattern for the validation loop will look like this, where you turn off gradients, set the model to evaluation mode, calculate the validation loss and metric, then set the model back to train mode.\n\n```python\n# turn off gradients\nwith torch.no_grad():\n \n # set model to evaluation mode\n model.eval()\n \n # validation pass here\n for images, labels in testloader:\n ...\n\n# set model back to train mode\nmodel.train()\n```",
"_____no_output_____"
],
[
"> **Exercise:** Add dropout to your model and train it on Fashion-MNIST again. See if you can get a lower validation loss or higher accuracy.",
"_____no_output_____"
]
],
[
[
"## TODO: Define your model with dropout added\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n\ninput_dims = 28*28\nhidden_dims = [256, 128, 64]\noutput_dims = 10\n\n\nclass FMNISTNet(nn.Module):\n def __init__(self):\n super().__init__()\n self.input_dims = 28*28\n self.hidden_dims = [256, 128, 64]\n self.output_dims = 10\n \n self.relu = nn.ReLU()\n self.logsoftmax = nn.LogSoftmax(dim=1)\n self.dropout = nn.Dropout(p=0.2)\n \n self.fc1 = nn.Linear(input_dims, hidden_dims[0])\n self.fc2 = nn.Linear(hidden_dims[0], hidden_dims[1])\n self.fc3 = nn.Linear(hidden_dims[1], hidden_dims[2])\n self.fc4 = nn.Linear(hidden_dims[2], output_dims)\n \n def forward(self, x):\n x = x.view(x.shape[0], input_dims)\n \n x = self.fc1(x)\n x = self.relu(x)\n x = self.dropout(x)\n \n x = self.fc2(x)\n x = self.relu(x)\n x = self.dropout(x)\n \n x = self.fc3(x)\n x = self.relu(x)\n x = self.dropout(x)\n \n x = self.fc4(x)\n out = self.logsoftmax(x)\n \n return out",
"_____no_output_____"
],
[
"## TODO: Train your model **with dropout**, and monitor the training progress with the validation loss and accuracy\nfrom torch import optim\n\n\nmodel = FMNISTNet()\noptimizer = optim.Adam(model.parameters(), lr=0.003)\n#optimizer = optim.SGD(model.parameters(), lr=0.01)\ncriterion = nn.NLLLoss()\n\nepochs = 30\ntrain_losses, test_losses = [], []\n\nfor e in range(epochs):\n print('epoch', e)\n running_loss = 0\n \n for images, labels in trainloader:\n # Flatten MNIST images into a 784 long vector\n images = images.view(images.shape[0], input_dims)\n \n optimizer.zero_grad()\n\n logps = model.forward(images)\n loss = criterion(logps, labels)\n\n loss.backward()\n optimizer.step()\n running_loss += loss.item()\n \n else:\n test_loss = 0\n accuracy = 0\n with torch.no_grad():\n # validation pass here\n model.eval() # turn on eval mode for model\n for images, labels in testloader:\n log_ps = model.forward(images)\n ps = torch.exp(log_ps)\n\n loss = criterion(log_ps, labels)\n test_loss += loss\n\n top_p, top_class = ps.topk(1, dim=1)\n equals = top_class == labels.view(*top_class.shape)\n accuracy += torch.mean(equals.type(torch.FloatTensor))\n\n train_losses.append(running_loss/len(trainloader))\n test_losses.append(test_loss/len(testloader))\n\n print('Training loss: {:.4f}'.format(running_loss / len(trainloader)))\n print('Test loss: {:.4f}'.format(test_loss / len(testloader)))\n print('Test accuracy: {:.4f}'.format(accuracy / len(testloader)))\n \n model.train() # turn on train mode for model",
"epoch 0\nTraining loss: 0.6110\nTest loss: 0.4779\nTest accuracy: 0.8279\nepoch 1\nTraining loss: 0.4838\nTest loss: 0.4476\nTest accuracy: 0.8386\nepoch 2\nTraining loss: 0.4466\nTest loss: 0.4176\nTest accuracy: 0.8459\nepoch 3\nTraining loss: 0.4363\nTest loss: 0.4048\nTest accuracy: 0.8590\nepoch 4\nTraining loss: 0.4231\nTest loss: 0.4011\nTest accuracy: 0.8597\nepoch 5\nTraining loss: 0.4161\nTest loss: 0.4057\nTest accuracy: 0.8585\nepoch 6\nTraining loss: 0.4053\nTest loss: 0.4150\nTest accuracy: 0.8546\nepoch 7\nTraining loss: 0.3997\nTest loss: 0.3875\nTest accuracy: 0.8633\nepoch 8\nTraining loss: 0.3966\nTest loss: 0.4235\nTest accuracy: 0.8558\nepoch 9\nTraining loss: 0.3927\nTest loss: 0.3915\nTest accuracy: 0.8620\nepoch 10\nTraining loss: 0.3880\nTest loss: 0.3938\nTest accuracy: 0.8662\nepoch 11\nTraining loss: 0.3782\nTest loss: 0.3782\nTest accuracy: 0.8618\nepoch 12\nTraining loss: 0.3744\nTest loss: 0.3710\nTest accuracy: 0.8721\nepoch 13\nTraining loss: 0.3772\nTest loss: 0.3831\nTest accuracy: 0.8631\nepoch 14\nTraining loss: 0.3685\nTest loss: 0.3876\nTest accuracy: 0.8609\nepoch 15\nTraining loss: 0.3731\nTest loss: 0.3778\nTest accuracy: 0.8641\nepoch 16\nTraining loss: 0.3664\nTest loss: 0.3950\nTest accuracy: 0.8693\nepoch 17\nTraining loss: 0.3681\nTest loss: 0.3897\nTest accuracy: 0.8683\nepoch 18\nTraining loss: 0.3653\nTest loss: 0.3854\nTest accuracy: 0.8638\nepoch 19\nTraining loss: 0.3564\nTest loss: 0.3860\nTest accuracy: 0.8686\nepoch 20\nTraining loss: 0.3584\nTest loss: 0.3949\nTest accuracy: 0.8616\nepoch 21\nTraining loss: 0.3562\nTest loss: 0.3851\nTest accuracy: 0.8667\nepoch 22\nTraining loss: 0.3601\nTest loss: 0.3763\nTest accuracy: 0.8685\nepoch 23\nTraining loss: 0.3495\nTest loss: 0.3869\nTest accuracy: 0.8692\nepoch 24\nTraining loss: 0.3594\nTest loss: 0.3707\nTest accuracy: 0.8703\nepoch 25\nTraining loss: 0.3552\nTest loss: 0.3617\nTest accuracy: 0.8728\nepoch 26\nTraining loss: 0.3459\nTest loss: 0.3831\nTest accuracy: 0.8738\nepoch 27\nTraining loss: 0.3460\nTest loss: 0.3668\nTest accuracy: 0.8738\nepoch 28\nTraining loss: 0.3377\nTest loss: 0.3706\nTest accuracy: 0.8717\nepoch 29\nTraining loss: 0.3520\nTest loss: 0.3711\nTest accuracy: 0.8644\n"
],
[
"plt.plot(range(epochs), train_losses, label='Training loss')\nplt.plot(range(epochs), test_losses, label='Validation loss')\nplt.title('(Less!) overfitting in action (*with* dropout)')\nplt.xlabel('epochs')\nplt.ylabel('loss')\nplt.legend(loc='best')\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Inference\n\nNow that the model is trained, we can use it for inference. We've done this before, but now we need to remember to set the model in inference mode with `model.eval()`. You'll also want to turn off autograd with the `torch.no_grad()` context.",
"_____no_output_____"
]
],
[
[
"# Import helper module (should be in the repo)\nimport helper\n\n# Turn eval mode on for model. Test out your network!\nmodel.eval()\n\ndataiter = iter(testloader)\nimages, labels = dataiter.next()\nimg = images[0]\n# Convert 2D image to 1D vector\nimg = img.view(1, 784)\n\n# Calculate the class probabilities (softmax) for img\n#with torch.no_grad():\n# for images, labels in testloader:\n# log_ps = model.forward(images)\n# ps = torch.exp(log_ps)\n\nlog_ps = model.forward(img)\nps = torch.exp(log_ps)\n\n# Plot the image and probabilities\nhelper.view_classify(img.view(1, 28, 28), ps, version='Fashion')\n\n# Turn training mode back on for model.\nmodel.train()",
"_____no_output_____"
]
],
[
[
"## Next Up!\n\nIn the next part, I'll show you how to save your trained models. In general, you won't want to train a model everytime you need it. Instead, you'll train once, save it, then load the model when you want to train more or use if for inference.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a19355f6a0dfefc30cf572c5333ac922721a6d3
| 2,322 |
ipynb
|
Jupyter Notebook
|
Day-1/Activities/02-Stu_IceCreamStore/Unsolved/.ipynb_checkpoints/IceCreamConnector-checkpoint.ipynb
|
racheltrindle/sqlalchemy-challenge
|
0315f13ab66b61589969fd06306e11a90e6c88bb
|
[
"ADSL"
] | null | null | null |
Day-1/Activities/02-Stu_IceCreamStore/Unsolved/.ipynb_checkpoints/IceCreamConnector-checkpoint.ipynb
|
racheltrindle/sqlalchemy-challenge
|
0315f13ab66b61589969fd06306e11a90e6c88bb
|
[
"ADSL"
] | null | null | null |
Day-1/Activities/02-Stu_IceCreamStore/Unsolved/.ipynb_checkpoints/IceCreamConnector-checkpoint.ipynb
|
racheltrindle/sqlalchemy-challenge
|
0315f13ab66b61589969fd06306e11a90e6c88bb
|
[
"ADSL"
] | null | null | null | 20.368421 | 94 | 0.544789 |
[
[
[
"### Instructions\n* Use the database path to create a sqlite engine\n* Use the engine to select all of the rows and columns from the table `icecreamstore`\n* Create a new query that finds the ice cream flavors that cost $1.25 or greater",
"_____no_output_____"
],
[
"# SQL Alchemy\nfrom sqlalchemy import create_engine\n\ndatabase_path = \"../Resources/icecreamstore.sqlite\"",
"_____no_output_____"
],
[
"# Create an engine to connect to the database\ndata = create_engine",
"_____no_output_____"
],
[
"# Query All Records in the the Database\n",
"_____no_output_____"
],
[
"# Query Single Record in the the Database\n",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code"
]
] |
4a19437671170b7e622f119108d4a6f9269902b5
| 22,361 |
ipynb
|
Jupyter Notebook
|
Write csv file in pandas 2.ipynb
|
akshhpatil/pandas-tutorial
|
d1e8449334ce870dd922c93bf76993efe1c63923
|
[
"MIT"
] | 1 |
2021-04-22T15:23:14.000Z
|
2021-04-22T15:23:14.000Z
|
Write csv file in pandas 2.ipynb
|
akshhpatil/pandas-tutorial
|
d1e8449334ce870dd922c93bf76993efe1c63923
|
[
"MIT"
] | null | null | null |
Write csv file in pandas 2.ipynb
|
akshhpatil/pandas-tutorial
|
d1e8449334ce870dd922c93bf76993efe1c63923
|
[
"MIT"
] | null | null | null | 26.525504 | 136 | 0.319708 |
[
[
[
"# Write csv file in pandas",
"_____no_output_____"
]
],
[
[
"import pandas as pd",
"_____no_output_____"
],
[
"data =pd.read_csv('C:\\\\Users\\\\admin\\\\Desktop\\\\book1.csv')\ndata",
"_____no_output_____"
],
[
"data.head()",
"_____no_output_____"
],
[
"data.head(2)",
"_____no_output_____"
],
[
"data.tail()",
"_____no_output_____"
],
[
"data =pd.read_csv('C:\\\\Users\\\\admin\\\\Desktop\\\\book1.csv',dtype = {'age':'float64'}) #change datatype value int to float ",
"_____no_output_____"
],
[
"data",
"_____no_output_____"
],
[
"data =pd.read_csv('C:\\\\Users\\\\admin\\\\Desktop\\\\book1.csv')\ndata",
"_____no_output_____"
],
[
"data =pd.read_csv('C:\\\\Users\\\\admin\\\\Desktop\\\\book1.csv' , true_values = [\"yes\"])\ndata",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a194484ad70feaec03b963ba9bf0c270ceabd43
| 15,591 |
ipynb
|
Jupyter Notebook
|
stats for data analysis/week3/test3.ipynb
|
etomoscow/mipt-yandex-ml-course
|
dbaa1cce5a5d1f38b547473f303ccac3a6232947
|
[
"MIT"
] | 1 |
2021-11-02T15:16:20.000Z
|
2021-11-02T15:16:20.000Z
|
stats for data analysis/week3/test3.ipynb
|
etomoscow/mipt-yandex-ml-course
|
dbaa1cce5a5d1f38b547473f303ccac3a6232947
|
[
"MIT"
] | null | null | null |
stats for data analysis/week3/test3.ipynb
|
etomoscow/mipt-yandex-ml-course
|
dbaa1cce5a5d1f38b547473f303ccac3a6232947
|
[
"MIT"
] | null | null | null | 27.545936 | 277 | 0.426336 |
[
[
[
"import pandas as pd\nimport numpy as np\n\nfrom itertools import combinations\nfrom scipy.stats import pearsonr\nfrom scipy import stats\nfrom statsmodels.sandbox.stats.multicomp import multipletests ",
"_____no_output_____"
],
[
"df = pd.read_csv('AUCs.txt', sep = '\\t')",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
]
],
[
[
"Классификатор C4.5 и три его модификации: с оптимизацией гиперпараметра m, гиперпараметра cf и с одновременной оптимизацией обоих гиперпараметров. Эти четыре классификатора сравнивались на 14 наборах данных. На каждом датасете был посчитан AUC каждого классификатора. \n\nИспользуя критерий знаковых рангов, проведите попарное сравнение каждого классификатора с каждым. Выберите два классификатора, различие между которыми наиболее статистически значимо.\n\n",
"_____no_output_____"
]
],
[
[
"ddf = pd.DataFrame(columns = ['Classifier 1', 'Classifier 2', 'Wilx', 'p-value'])\nn = 0 \nfor i,j in combinations(np.arange(1,5), 2):\n ddf.loc[n, 'Classifier 1'], ddf.loc[n, 'Classifier 2'] = df.columns[i], df.columns[j]\n ddf.loc[n, 'Wilx'] = stats.wilcoxon(df.iloc[:,i], df.iloc[:,j])[0]\n ddf.loc[n, 'p-value'] = stats.wilcoxon(df.iloc[:,i], df.iloc[:,j])[1]\n n += 1",
"_____no_output_____"
],
[
"ddf",
"_____no_output_____"
]
],
[
[
"Сколько статистически значимых на уровне 0.05 различий мы обнаружили?\n\n",
"_____no_output_____"
]
],
[
[
"ddf[ddf['p-value'] <= 0.05].shape",
"_____no_output_____"
]
],
[
[
"Судя по данным из предыдущего опроса, настройка какого из параметров классификатора даёт более значимое увеличение качества?\n\n",
"_____no_output_____"
]
],
[
[
"df.describe()",
"_____no_output_____"
]
],
[
[
"Сравнивая 4 классификатора между собой, мы проверили 6 гипотез. Давайте сделаем поправку на множественную проверку. Начнём с метода Холма. Сколько гипотез можно отвергнуть на уровне значимости 0.05 после поправки этим методом?\n\n",
"_____no_output_____"
]
],
[
[
"multipletests(ddf['p-value'], alpha=0.05,method = 'holm')",
"_____no_output_____"
]
],
[
[
"Сколько гипотез можно отвергнуть на уровне значимости 0.05 после поправки методом Бенджамини-Хохберга?\n\n",
"_____no_output_____"
]
],
[
[
"multipletests(ddf['p-value'], alpha=0.05,method = 'fdr_bh')",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a19496d0dd9e83e05437501b1ec709ffd13de9a
| 310,498 |
ipynb
|
Jupyter Notebook
|
Chicago Crime Rate Analysis.ipynb
|
Ina02/Prediction-of-Crime-Rates
|
3480e412e24a175466a6a78066a03cbb2f970be8
|
[
"MIT"
] | null | null | null |
Chicago Crime Rate Analysis.ipynb
|
Ina02/Prediction-of-Crime-Rates
|
3480e412e24a175466a6a78066a03cbb2f970be8
|
[
"MIT"
] | null | null | null |
Chicago Crime Rate Analysis.ipynb
|
Ina02/Prediction-of-Crime-Rates
|
3480e412e24a175466a6a78066a03cbb2f970be8
|
[
"MIT"
] | null | null | null | 62.23652 | 58,986 | 0.652326 |
[
[
[
"# Analysis and Prediction of Crimes in Chicago",
"_____no_output_____"
],
[
"## Overview",
"_____no_output_____"
],
[
"The goal of this project is to analyze the Chicago Crimes Dataset, classify the crimes and build a model that predicts the crime for 2017-2020.This project consists of three phases - Analyzing the dataset, Classifying the crimes, Building a prediction model.\n\nFor analysing the data, I have used the pandas package. The reason I chose pandas package is that it has built-in functionality for a lot of common data-processing applications: for example, easy groupby syntax, easy joins (which are really efficient in pandas), rolling windows, etc.\n\nFor Classifying the crimes and building a Prediction model, I have used LDA, linear regression and non-linear regression on the data and then analyzed the results to identify which algorithm works better.\n\n### Table of Contents\n* [Data](#Data)\n* [Data Analysis](#Data-Analysis)\n* [Classifying the Data](#Classifying-the-Data)\n* [Prediction Model](#Prediction-Model)\n * [Linear Regression Model](#Linear-Regression-Model)\n * [Linear Ridge Regression Model](#Linear-Ridge-Regression-Model)\n * [Neural Networks Model](#Neural-Networks-Model)\n* [Experimental Results](#Experimental-Results)\n* [Conclusion](#Conclusion)\n* [References](#References)\n",
"_____no_output_____"
],
[
"## Data",
"_____no_output_____"
],
[
"This dataset is taken from [Kaggle](https://www.kaggle.com/currie32/crimes-in-chicago) and reflects reported incidents of crime (with the exception of murders where data exists for each victim) that occurred in the City of Chicago from 2001 to January,2017. The data is in the following form:\n\n<table>\n<tr>\n<th> </th>\n<th>ID</th>\n<th>Case Number</th>\n<th>Date</th>\n<th>Block</th>\n<th>IUCR</th>\n<th>Primary Type</th>\n<th>Description</th>\n</tr>\n<tr>\n<td>388</td>\n<td>4785</td>\n<td>HP610824</td>\n<td>10/07/2008\n 12:39:00 PM</td>\n<td>000XX E 75TH ST</td>\n<td>0110</td>\n<td>HOMICIDE</td>\n<td>FIRST DEGREE MURDER</td>\n</tr>\n<tr>\n<td>835</td>\n<td>4786</td>\n<td>HP616595</td>\n<td>10/09/2008\n 03:30:00 AM</td>\n<td>048XX W POLK ST</td>\n<td>0110</td>\n<td>HOMICIDE</td>\n<td>FIRST DEGREE MURDER</td>\n</tr>\n<tr>\n<td>3</td>\n<td>10508693</td>\n<td>HZ250496</td>\n<td>05/03/2016\n 11:40:00 PM</td>\n<td>013XX S SAWYER AVE</td>\n<td>0486</td>\n<td>BATTERY</td>\n<td>DOMESTIC BATTERY SIMPLE</td>\n</tr>\n</table>\n\n\nThe dataset consists of 4 different files for crimes in 2001-2004, 2005-2007, 2008-2011,2011-2017 and has a size of 500MB. Since the data is really huge and has many attributes, it seems to be very interesting.",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\nimport numpy as np\n%matplotlib inline\nimport pandas as pd",
"_____no_output_____"
]
],
[
[
"Let's first load the 4 files.",
"_____no_output_____"
]
],
[
[
"crimes1_4 = pd.read_csv('Chicago_Crimes_2001_to_2004.csv',sep=',', error_bad_lines=False, index_col=False, dtype='unicode')\ncrimes1_4.head(5)",
"b'Skipping line 1513591: expected 23 fields, saw 24\\n'\n"
],
[
"crimes5_7 = pd.read_csv('Chicago_Crimes_2005_to_2007.csv',sep=',', error_bad_lines=False, index_col=False, dtype='unicode')\ncrimes5_7.head(5)",
"b'Skipping line 533719: expected 23 fields, saw 24\\n'\n"
],
[
"crimes8_11 = pd.read_csv('Chicago_Crimes_2008_to_2011.csv',sep=',', error_bad_lines=False, index_col=False, dtype='unicode')\ncrimes8_11.head(5)",
"b'Skipping line 1149094: expected 23 fields, saw 41\\n'\n"
],
[
"crimes12_17 = pd.read_csv('Chicago_Crimes_2012_to_2017.csv',index_col='Date')\ncrimes12_17.head(5)",
"_____no_output_____"
]
],
[
[
"Now as we have loaded all the files, let's try to analyze the data.",
"_____no_output_____"
],
[
"# Data Analysis",
"_____no_output_____"
],
[
"Let's first analyze the crimes from 2001-2004.",
"_____no_output_____"
]
],
[
[
"pt1_4 = crimes1_4[['Primary Type']]\ncrime_counts1_4 = pd.DataFrame(pt1_4.groupby('Primary Type').size().sort_values(ascending=False).rename('counts').reset_index()).head(10)",
"_____no_output_____"
],
[
"crime_counts1_4",
"_____no_output_____"
]
],
[
[
"From the above table we can see the top 10 crimes that occured in 2001-2004.",
"_____no_output_____"
]
],
[
[
"loc1_4 = crimes1_4[['Location Description']]\nlocations1_4 = pd.DataFrame(loc1_4.groupby('Location Description').size().sort_values(ascending=False).rename('counts').reset_index()).head(10)",
"_____no_output_____"
],
[
"locations1_4",
"_____no_output_____"
]
],
[
[
"From this table we can say that maximum crimes have occured on the streets and residences.\n\nNow let's analyze the crimes from 2005 to 2007, 2008 to 2011 and 2012 to 2017 in the similar way.",
"_____no_output_____"
]
],
[
[
"pt5_7 = crimes5_7[['Primary Type']]\ncrime_counts5_7 = pd.DataFrame(pt5_7.groupby('Primary Type').size().sort_values(ascending=False).rename('counts').reset_index()).head(10)\ncrime_counts5_7",
"_____no_output_____"
],
[
"loc5_7 = crimes5_7[['Location Description']]\nlocations5_7 = pd.DataFrame(loc5_7.groupby('Location Description').size().sort_values(ascending=False).rename('counts').reset_index()).head(10)\nlocations5_7",
"_____no_output_____"
],
[
"pt8_11 = crimes8_11[['Primary Type']]\ncrime_counts8_11 = pd.DataFrame(pt5_7.groupby('Primary Type').size().sort_values(ascending=False).rename('counts').reset_index()).head(10)\ncrime_counts8_11",
"_____no_output_____"
],
[
"loc8_11 = crimes8_11[['Location Description']]\nlocations8_11 = pd.DataFrame(loc8_11.groupby('Location Description').size().sort_values(ascending=False).rename('counts').reset_index()).head(10)\nlocations8_11",
"_____no_output_____"
],
[
"pt12_17 = crimes12_17[['Primary Type']]\ncrime_counts12_17 = pd.DataFrame(pt12_17.groupby('Primary Type').size().sort_values(ascending=False).rename('counts').reset_index()).head(10)\ncrime_counts12_17",
"_____no_output_____"
],
[
"loc12_17 = crimes12_17[['Location Description']]\nlocations12_17 = pd.DataFrame(loc12_17.groupby('Location Description').size().sort_values(ascending=False).rename('counts').reset_index()).head(10)\nlocations12_17",
"_____no_output_____"
]
],
[
[
"Let us now compare the top crimes in these years.",
"_____no_output_____"
]
],
[
[
"import seaborn as sns\n\n# Initialize the matplotlib figure\nplt.figure(figsize=(20,20))\nf, ax = plt.subplots(2,2)\n\nsns.barplot(x=\"counts\", y=\"Primary Type\", data=crime_counts1_4,\n label=\"Total\", color=\"b\",ax=ax[0][0])\nax[0][0].set_title(\"2001-2004\")\n\nsns.barplot(x=\"counts\", y=\"Primary Type\", data=crime_counts5_7,\n label=\"Total\", color=\"b\",ax=ax[0][1])\nax[0][1].set_title(\"2005-2007\")\n\nsns.barplot(x=\"counts\", y=\"Primary Type\", data=crime_counts8_11,\n label=\"Total\", color=\"b\",ax=ax[1][0])\nax[1][0].set_title(\"2008-2011\")\n\nsns.barplot(x=\"counts\", y=\"Primary Type\", data=crime_counts12_17,\n label=\"Total\", color=\"b\",ax=ax[1][1])\nax[1][1].set_title(\"2012-2017\")\n\nplt.tight_layout()",
"_____no_output_____"
]
],
[
[
"From the above plots we can see that Theft and Battery have been the top crimes in all these years and the number of Narcotics and Criminal Damage crimes are almost the same.\n\nLet's now compare the locations at which the crimes take place.",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(30,30))\nf, ax = plt.subplots(2,2)\n\nsns.barplot(x=\"counts\", y=\"Location Description\", data=locations1_4,\n label=\"Total\", color=\"b\",ax=ax[0][0])\nax[0][0].set_title(\"2001-2004\")\n\nsns.barplot(x=\"counts\", y=\"Location Description\", data=locations5_7,\n label=\"Total\", color=\"b\",ax=ax[0][1])\nax[0][1].set_title(\"2005-2007\")\n\nsns.barplot(x=\"counts\", y=\"Location Description\", data=locations8_11,\n label=\"Total\", color=\"b\",ax=ax[1][0])\nax[1][0].set_title(\"2008-2011\")\n\nsns.barplot(x=\"counts\", y=\"Location Description\", data=locations12_17,\n label=\"Total\", color=\"b\",ax=ax[1][1])\nax[1][1].set_title(\"2012-2017\")\n\nplt.tight_layout()",
"_____no_output_____"
]
],
[
[
"The plots obtained look similar to the plot of types of crimes. Street and Residence remain in the top position for crimes and Residence and Sidewalk are almost similar.\n\nLet's now compare the number of crimes with the number of arrests in these years.",
"_____no_output_____"
]
],
[
[
"crimesPerYear1_4 = pd.DataFrame(crimes1_4.groupby(['Year']).size().sort_values(ascending=False).rename('Count').reset_index())\ncrimesPerYear1_4 = crimesPerYear1_4.head(4)\ncrimesPerYear1_4",
"_____no_output_____"
],
[
"crimesPerYear5_7 = pd.DataFrame(crimes5_7.groupby(['Year']).size().sort_values(ascending=False).rename('Count').reset_index())\ncrimesPerYear5_7",
"_____no_output_____"
],
[
"crimesPerYear8_11 = pd.DataFrame(crimes8_11.groupby(['Year']).size().sort_values(ascending=False).rename('Count').reset_index())\ncrimesPerYear8_11",
"_____no_output_____"
],
[
"crimesPerYear12_17 = pd.DataFrame(crimes12_17.groupby(['Year']).size().sort_values(ascending=False).rename('Count').reset_index())\ncrimesPerYear12_17",
"_____no_output_____"
],
[
"frames = [crimesPerYear1_4, crimesPerYear5_7, crimesPerYear8_11,crimesPerYear12_17]\nresult = pd.concat(frames)\nresult",
"_____no_output_____"
]
],
[
[
"Lets now plot these values. We cannot directly plot the above obtained values since it is a DataFrame. So we first convert it to a numpy array and then plot the crime counts per year.",
"_____no_output_____"
]
],
[
[
"results = result.as_matrix(columns=[result.columns[:]])\nresults1 = results[:,:].astype(int)\nresults1 = results1[results1[:,0].argsort()]\nresults1",
"_____no_output_____"
],
[
"y_pos = np.arange(len(results1[:,0]))\nplt.barh(y_pos, results1[:,1], align='center', alpha=0.5)\nplt.yticks(y_pos, results1[:,0])\nplt.xlabel('Crime counts')\nplt.title('Crime Counts over the years')\n \nplt.show()",
"_____no_output_____"
]
],
[
[
"From the above plot we can see that the crime rate has gradually decreased until 2004 and from 2005-2008, there were more crimes registered. From 2009, there is a decrease in the crime rate. \n\n2017 shows very less crime rate because the dataset consists of crimes registered till January, 2017 only.\n\nNow let us try to analyze the number of arrests over these years.",
"_____no_output_____"
]
],
[
[
"#arrest_yearly = crimes1_4[crimes1_4['Arrest'] == True]['Arrest']\ndf = pd.DataFrame(crimes1_4, columns = ['Arrest', 'Year'])\ndf1 = df[df.Arrest == \"True\"]\narrestsPerYear1_4 = pd.DataFrame(df1.groupby(['Year']).size().sort_values(ascending=False).rename('ArrestCount').reset_index()).head(4)\narrestsPerYear1_4",
"_____no_output_____"
],
[
"df = pd.DataFrame(crimes5_7, columns = ['Arrest', 'Year'])\ndf1 = df[df.Arrest == \"True\"]\narrestsPerYear5_7 = pd.DataFrame(df1.groupby(['Year']).size().sort_values(ascending=False).rename('ArrestCount').reset_index())\narrestsPerYear5_7",
"_____no_output_____"
],
[
"df = pd.DataFrame(crimes8_11, columns = ['Arrest', 'Year'])\ndf1 = df[df.Arrest == \"True\"]\narrestsPerYear8_11 = pd.DataFrame(df1.groupby(['Year']).size().sort_values(ascending=False).rename('ArrestCount').reset_index())\narrestsPerYear8_11",
"_____no_output_____"
],
[
"df = pd.DataFrame(crimes12_17, columns = ['Arrest', 'Year'])\ndf1 = df[df.Arrest == True]\ndf2 = df[df.Arrest == False]\narrestsPerYear12_17 = pd.DataFrame(df1.groupby(['Year']).size().sort_values(ascending=False).rename('ArrestCount').reset_index())\narrestsPerYear12_17",
"_____no_output_____"
]
],
[
[
"Let's now plot these arrest counts against the crime counts.",
"_____no_output_____"
]
],
[
[
"frames = [arrestsPerYear1_4, arrestsPerYear5_7, arrestsPerYear8_11,arrestsPerYear12_17]\nresult = pd.concat(frames)\nresults = result.as_matrix(columns=[result.columns[:]])\nresults2 = results[:,:].astype(int)\nresults2 = results2[results2[:,0].argsort()]\nresults2",
"_____no_output_____"
],
[
"ax = plt.subplot(111)\nw = 0.3\nax.bar(results2[:,0]-w, results1[:,1],width=w,color='b',align='center')\nax.bar(results2[:,0], results2[:,1],width=w,color='g',align='center')\nax.autoscale(tight=True)\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"From the above plots we can see that the number of arrests recorded in each year is very less compared to the number of crimes.",
"_____no_output_____"
],
[
"## Classifying the Data",
"_____no_output_____"
],
[
"Let us consider the Crimes data from 2012-2017 for classifying the data. Before we perform any operations on the data, let's first modify the data so that we have the Date as index. This would help us later when we try to predict the crimes.",
"_____no_output_____"
]
],
[
[
"crimes = crimes12_17.iloc[:, 3: ]\ncrimes.head()",
"_____no_output_____"
],
[
"crimes.index = pd.to_datetime(crimes.index)\ncrimes.head(5)",
"_____no_output_____"
]
],
[
[
"Now since we have the Date as index, lets start the classification of data. Before we classify the types of crimes, let's see how many types of crimes are there.",
"_____no_output_____"
]
],
[
[
"crime_counts12_17 = pd.DataFrame(pt12_17.groupby('Primary Type').size().sort_values(ascending=False).rename('counts').reset_index())\ncrime_counts12_17",
"_____no_output_____"
]
],
[
[
"From the above table we can see that there are about 32 Primary Types of crimes. Also we can see that some of the crimes can be classified as similar types. For example - Theft, Robbery, Motor Vehicle Theft and Burglary can be given the same label. Similarly we can consider Battery, Sex Offence, Crim Sexual Assualt can be considered as similar crimes.\n\nNow lets try to classify the data by grouping similar items as below:\n\n<table>\n<tr>\n<th>Categories</th>\n<th>Label</th>\n<th>Class</th>\n</tr>\n<tr>\n<td>THEFT, BURGLARY, MOTOR VEHICLE THEFT, ROBBERY</td>\n<td>THEFT</td>\n<th>1</th>\n</tr>\n<tr>\n<td>BATTERY, CRIM SEXUAL ASSAULT, SEX OFFENSE</td>\n<td>SEXUAL ASSAULT</td>\n<td>2</td>\n</tr>\n<tr>\n<td>NARCOTICS, OTHER NARCOTIC VIOLATION</td>\n<td>NARCOTICS</td>\n<td>3</td>\n</tr>\n<tr>\n<td>ASSAULT, INTIMIDATION</td>\n<td>ASSAULT</td>\n<td>4</td>\n</tr>\n<tr>\n<td>OTHER OFFENSE</td>\n<td>OTHER OFFENSE</td>\n<td>5</td>\n</tr>\n<tr>\n<td>DECEPTIVE PRACTICE</td>\n<td>DECEPTIVE PRACTICE</td>\n<td>6</td>\n</tr>\n<tr>\n<td>CRIMINAL TRESPASS</td>\n<td>CRIMINAL TRESPASS</td>\n<td>7</td>\n</tr>\n<tr>\n<td>WEAPONS VIOLATION, CONCEALED CARRY LICENSE VIOLATION</td>\n<td>WEAPONS VIOLATION</td>\n<td>8</td>\n</tr>\n<tr>\n<td>PUBLIC INDECENCY, PUBLIC PEACE VIOLATION</td>\n<td>PUBLIC INDECENCY</td>\n<td>9</td>\n</tr>\n<tr>\n<td>OFFENSE INVOLVING CHILDREN</td>\n<td>OFFENSE INVOLVING CHILDREN</td>\n<td>10</td>\n</tr>\n<tr>\n<td>PROSTITUTION</td>\n<td>PROSTITUTION</td>\n<td>11</td>\n</tr>\n<tr>\n<td>INTERFERENCE WITH PUBLIC OFFICER</td>\n<td>INTERFERENCE WITH PUBLIC OFFICER</td>\n<td>12</td>\n</tr>\n<tr>\n<td>HOMICIDE</td>\n<td>HOMICIDE</td>\n<td>13</td>\n</tr>\n<tr>\n<td>ARSON, CRIMINAL DAMAGE</td>\n<td>ARSON</td>\n<td>14</td>\n</tr>\n<tr>\n<td>GAMBLING</td>\n<td>GAMBLING</td>\n<td>15</td>\n</tr>\n<tr>\n<td>LIQUOR LAW VIOLATION</td>\n<td>LIQUOR LAW VIOLATION</td>\n<td>16</td>\n</tr>\n<tr>\n<td>KIDNAPPING</td>\n<td>KIDNAPPING</td>\n<td>17</td>\n</tr>\n<tr>\n<td>STALKING, OBSCENITY</td>\n<td>STALKING</td>\n<td>18</td>\n</tr>\n<tr>\n<td>NON - CRIMINAL, NON-CRIMINAL (SUBJECT SPECIFIED)</td>\n<td>NON - CRIMINAL</td>\n<td>19</td>\n</tr>\n<tr>\n<td>HUMAN TRAFFICKING</td>\n<td>HUMAN TRAFFICKING</td>\n<td>20</td>\n</tr>\n</table>\n\nIn this way we reduce 32 categories to 20 categories.",
"_____no_output_____"
]
],
[
[
"classifiedCrimes = crimes12_17.replace(['THEFT', 'BURGLARY', 'MOTOR VEHICLE THEFT', 'ROBBERY' ,'BATTERY', 'CRIM SEXUAL ASSAULT',\n 'SEX OFFENSE' , 'NARCOTICS','OTHER NARCOTIC VIOLATION' , 'ASSAULT', 'INTIMIDATION' ,\n 'OTHER OFFENSE' , 'DECEPTIVE PRACTICE' , 'CRIMINAL TRESPASS' , 'WEAPONS VIOLATION' , \n 'CONCEALED CARRY LICENSE VIOLATION','PUBLIC INDECENCY', 'PUBLIC PEACE VIOLATION',\n 'OFFENSE INVOLVING CHILDREN','PROSTITUTION','INTERFERENCE WITH PUBLIC OFFICER','HOMICIDE',\n 'ARSON', 'CRIMINAL DAMAGE','GAMBLING','LIQUOR LAW VIOLATION','KIDNAPPING','STALKING', \n 'OBSCENITY','NON - CRIMINAL','NON-CRIMINAL', 'NON-CRIMINAL (SUBJECT SPECIFIED)','HUMAN TRAFFICKING']\n ,[1,1,1,1,2,2,2,3,3,4,4,5,6,7,8,8,9,9,10,11,12,13,14,14,15,16,17,18,18,19,19,19,20])\n",
"_____no_output_____"
],
[
"primaryTypes = classifiedCrimes[['Primary Type']]\nclassifiedCrimeCounts = pd.DataFrame(primaryTypes.groupby('Primary Type').size().sort_values(ascending=False).rename('counts').reset_index())\nclassifiedCrimeCounts",
"_____no_output_____"
]
],
[
[
"Now since we have all the class labels, let us crop the data by taking only the necessary fields.",
"_____no_output_____"
]
],
[
[
"classifiedCrimes = classifiedCrimes[['Primary Type','Latitude','Longitude','Year']]",
"_____no_output_____"
],
[
"classifiedCrimes.head(10)",
"_____no_output_____"
]
],
[
[
"Before we start building the prediction system, let's first remove all the missing values. Also in our prediction system, we would be predicting the crime based on the day of the week and the location. For this purpose we add another column to our data which corresponds to the day of the week. ",
"_____no_output_____"
]
],
[
[
"classifiedCrimes1 = classifiedCrimes.dropna(axis=0,how='any')",
"_____no_output_____"
],
[
"classifiedCrimes1.index = pd.to_datetime(classifiedCrimes1.index)",
"_____no_output_____"
],
[
"classifiedCrimes1.head(10)",
"_____no_output_____"
],
[
"classifiedCrimes1 = classifiedCrimes1.reset_index()\nclassifiedCrimes1['weekday'] = classifiedCrimes1['Date'].dt.dayofweek\nclassifiedCrimes1.head(10)",
"_____no_output_____"
]
],
[
[
"Now we have enough data to build the prediction system.",
"_____no_output_____"
],
[
"## Prediction Model",
"_____no_output_____"
],
[
"To predict the crimes in the future years, we first need to train a model. There are many methods for building a model but we require an accurate method. So let's experiment with these methods to find the best model.\n\n### Linear Regression Model\n\nLinear Regression is an approach for modeling the relationship between a scalar dependent variable Y and one or more explanatory variables (or independent variables) denoted by X.\n\n#### Method\n\nThe dataset is first loaded into a variable known as data and then we separate the Target values into T and the remaining data into X. Then we try to fit a linear model to all of the data to see how accurately we predict the residuary resistance for each sample. To do this we define three functions:\n1. model = train(X,T)\n2. predict = use(model,X)\n3. error = rmse(predict,T)\n\nIn our case, the Target values are the crimes(Primary Type) and X consists of the latitude, longitude and weekday.\n\n\n\n\nWe first use the train method to find the weights of each of the attribute. This method returns a model which consists of the set of keys < mean, standard deviation, weight>. \nTo find weight matrix, we use the formula : \n$$\n\\begin{align*}\n\\wv &= (\\Xv^T \\Xv)^{-1} \\Xv^T \\Tv\n\\end{align*}\n$$\n\nBut the weight matrix we obtain is not standardized. So we first standardize the values by using the following formula and then applying the above formula\n\n$$\n\\begin{align*}\n\\Xv &= \\frac{\\Xv - m }{s}\n\\end{align*}\n$$\n\nwhere, m is the mean ,\n s is the standard deviation.",
"_____no_output_____"
]
],
[
[
"import math\ndef train(X,T):\n means = np.mean(X,axis = 0)\n stds = np.std(X, axis = 0)\n Xs = (X - means) / stds\n Xs1 = np.hstack((np.ones((Xs.shape[0],1)),Xs))\n w = np.linalg.lstsq(np.dot(Xs1.T,Xs1), np.dot(Xs1.T, T))[0]\n return {'means':means, 'stds':stds, 'w':w}",
"_____no_output_____"
]
],
[
[
"Now we use this weight obtained from the above train() method in the use() method to get the predicted values of the model. In this method, we use the same mean and standard deviation as above.",
"_____no_output_____"
]
],
[
[
"def use(model, X):\n mean = model['means']\n std = model['stds']\n Xs = (X - mean) / std\n Xs1 = np.hstack((np.ones((Xs.shape[0],1)),Xs))\n new_w = model['w']\n predict = np.dot( Xs1,new_w )\n return predict",
"_____no_output_____"
]
],
[
[
"Now we take these predicted values and compare them with the Target values to find the error. We find the RMSE(Root Mean Square Error) for the Predicted values and the Target values using the following formula:\n\n$$\n\\begin{align*}\nrmse &= \\sqrt{\\frac{\\sum_{n=1}^N (Predict - T)^2}{N}}\n\\end{align*}\n$$",
"_____no_output_____"
]
],
[
[
"def rmse(predict, T):\n error = predict - T\n square = error ** 2\n mean = np.mean(square)\n root = np.sqrt(mean)\n return root",
"_____no_output_____"
]
],
[
[
"The RMSE value indicates the absolute fit of the model to the data – how close the observed data points are to the model’s predicted values.",
"_____no_output_____"
],
[
"#### Results",
"_____no_output_____"
],
[
"Let's first define the X and T matrices using classifiedCrimes1 dataFrame. Since this is a dataFrame, we first convert it to a numpy array so that we can slice it.\n\nThe matrix X should contain the Latitude, Longitude, Year and Weekday. T consists of the Primary Type(Crime)",
"_____no_output_____"
]
],
[
[
"CrimesData = classifiedCrimes1.as_matrix(columns=[classifiedCrimes1.columns[:]])",
"_____no_output_____"
],
[
"X = np.float64(CrimesData[:,2:6])\nX",
"_____no_output_____"
],
[
"T = np.float64(CrimesData[:,1:2])\nT",
"_____no_output_____"
]
],
[
[
"Let's now start training the model.",
"_____no_output_____"
]
],
[
[
"model = train(X, T)\npredict = use(model, X)\nerror = rmse(predict, T)",
"_____no_output_____"
],
[
"error",
"_____no_output_____"
]
],
[
[
"We can see that when we use linear regression to build our model, we got an error of 4.189\n\nLet us try some more approaches to see if we can build a better model for our prediction system.",
"_____no_output_____"
],
[
"### Linear Ridge Regression Model\n\nRidge regression generally yields better predictions than ordinary least squares solution, through a better compromise between bias and variance. It reduces the sum of squared errors.\n\nIf we add a term to our sum of squared error objective function that is the sum of all weight magnitudes except the bias weight. Then, we not only minimize the sum of squared errors, we also minimize the sum of the weight magnitudes:\n\n$$ \\sum_{i=1}^N (\\tv_i - \\xv_i^T \\wv)^2 + \\lambda \\sum_{i=2}^N w_i^2$$\n\nWith $\\lambda=0$ we have our usual linear regression objective function. With $\\lambda>0$, we are adding in a penalty for the weight magnitudes. So we get the equation for weight as\n\n$$ \\wv = (X^T X + \\lambda I)^{-1} X^T T $$\n\nIf we find the best value of $\\lambda$ by comparing error on the test data, it will give us an optimistic prediction of error on novel data, because the test data was used to pick the best $\\lambda$.\n\nInstead of comparing the error only on test data, we partition the data into multiple ($k$) subsets called \"folds\". We select one fold to be the test partition, another fold to be the validate partition, and collect the remaining folds to be the train partition. We can do this in $k\\,(k-1)$ ways. In order to divide the data into partitions, we use the **partitionKFolds** algorithm.\n\n#### Method",
"_____no_output_____"
]
],
[
[
" def partitionKFolds(X,T,nFolds,shuffle=False,nPartitions=3):\n # Randomly arrange row indices\n rowIndices = np.arange(X.shape[0])\n if shuffle:\n np.random.shuffle(rowIndices)\n # Calculate number of samples in each of the nFolds folds\n nSamples = X.shape[0]\n nEach = int(nSamples / nFolds)\n if nEach == 0:\n raise ValueError(\"partitionKFolds: Number of samples in each fold is 0.\")\n # Calculate the starting and stopping row index for each fold.\n # Store in startsStops as list of (start,stop) pairs\n starts = np.arange(0,nEach*nFolds,nEach)\n stops = starts + nEach\n stops[-1] = nSamples\n startsStops = list(zip(starts,stops))\n # Repeat with testFold taking each single fold, one at a time\n for testFold in range(nFolds):\n if nPartitions == 3:\n # Repeat with validateFold taking each single fold, except for the testFold\n for validateFold in range(nFolds):\n if testFold == validateFold:\n continue\n # trainFolds are all remaining folds, after selecting test and validate folds\n trainFolds = np.setdiff1d(range(nFolds), [testFold,validateFold])\n # Construct Xtrain and Ttrain by collecting rows for all trainFolds\n rows = []\n for tf in trainFolds:\n a,b = startsStops[tf] \n rows += rowIndices[a:b].tolist()\n Xtrain = X[rows,:]\n Ttrain = T[rows,:]\n # Construct Xvalidate and Tvalidate\n a,b = startsStops[validateFold]\n rows = rowIndices[a:b]\n Xvalidate = X[rows,:]\n Tvalidate = T[rows,:]\n # Construct Xtest and Ttest\n a,b = startsStops[testFold]\n rows = rowIndices[a:b]\n Xtest = X[rows,:]\n Ttest = T[rows,:]\n # Return partition matrices, then suspend until called again.\n yield Xtrain,Ttrain,Xvalidate,Tvalidate,Xtest,Ttest,testFold\n else:\n # trainFolds are all remaining folds, after selecting test and validate folds\n trainFolds = np.setdiff1d(range(nFolds), [testFold])\n # Construct Xtrain and Ttrain by collecting rows for all trainFolds\n rows = []\n for tf in trainFolds:\n a,b = startsStops[tf] \n rows += rowIndices[a:b].tolist()\n Xtrain = X[rows,:]\n Ttrain = T[rows,:]\n # Construct Xtest and Ttest\n a,b = startsStops[testFold]\n rows = rowIndices[a:b]\n Xtest = X[rows,:]\n Ttest = T[rows,:]\n # Return partition matrices, then suspend until called again.\n yield Xtrain,Ttrain,Xtest,Ttest,testFold",
"_____no_output_____"
]
],
[
[
"We define a new train method for ridge regression which consists of another parameter lambda. The use and rmse methods are same as we have used in linear regression.",
"_____no_output_____"
]
],
[
[
"def train(X,T,lamb):\n means = np.mean(X,axis = 0)\n stds = np.std(X, axis = 0)\n Xs = (X - means) / stds\n Xs1 = np.hstack((np.ones((Xs.shape[0],1)),Xs))\n w = np.linalg.lstsq(np.dot(Xs1.T,Xs1), np.dot(Xs1.T, T))[0]\n means = X.mean(0)\n stds = X.std(0)\n n,d = X.shape\n Xs1 = np.insert( (X - means)/stds, 0, 1, axis=1)\n lambDiag = np.eye(d+1) * lamb\n lambDiag[0,0] = 0\n w = np.linalg.lstsq( np.dot(Xs1.T,Xs1) + lambDiag, np.dot(Xs1.T,T))[0]\n return {'w': w, 'means':means, 'stds':stds}\n\ndef use(X,model):\n Xs1 = np.insert((X-model['means'])/model['stds'], 0, 1, axis=1)\n return np.dot(Xs1,model['w'])\n\ndef rmse(A,B):\n return np.sqrt(np.mean( (A-B)**2 ))",
"_____no_output_____"
]
],
[
[
"Now since we have all the required methods, lets start testing which lambda value gives the best results.\n\n#### Results",
"_____no_output_____"
],
[
"We need to determine which lamba value gives the bestresults for how many number of folds. To do this lets define a method which takes a set of lamba values and the number of folds as input and tells which one gives the best results.",
"_____no_output_____"
]
],
[
[
"def multipleLambdas(X, T, nFolds, lambdas):\n foldCount = 0\n results = []\n for Xtrain,Ttrain,Xval,Tval,Xtest,Ttest,_ in partitionKFolds(X,T,nFolds,True):\n for lamb in lambdas:\n model = train(Xtrain,Ttrain,lamb)\n predict = use(Xval,model)\n results.append([foldCount,lamb,rmse(use(Xtrain,model),Ttrain),rmse(use(Xval,model),Tval),rmse(use(Xtest,model),Ttest)])\n foldCount +=1\n results = np.array(results)\n bestresults = [] \n for i in range(foldCount):\n FCRow = np.take(results,(np.where(results[:,0:1] == i))[0], axis =0)\n minRow = np.where(results[:,3:4] == (np.amin(FCRow[:,3:4],axis=0)))[0]\n bestresults.append(np.take(results,minRow,axis=0))\n bestresults = np.array(bestresults)\n bestresults = bestresults.reshape(bestresults.shape[0], bestresults.shape[2])\n return bestresults",
"_____no_output_____"
]
],
[
[
"Let's now start experimenting with these methods.",
"_____no_output_____"
]
],
[
[
"lambdas = [0,1,2,3,4,5]\nbestresults = multipleLambdas(X,T,4,lambdas)\nbestresults",
"_____no_output_____"
]
],
[
[
"We can see that the least validation error obtained is 4.184 when we have 4 folds and when $\\lambda=0$ . This value is almost same as the error obtained when we used linear regression model. Let's try by increasing the number of folds and for a different set of lambda values.",
"_____no_output_____"
]
],
[
[
"lambdas = [0,5,10,15,20]\nbestresults = multipleLambdas(X,T,5,lambdas)\nbestresults",
"_____no_output_____"
]
],
[
[
"We can see that the least validation error in this case is 4.185 and is obtained for 5 folds with lambda value 20. Lets try to increase the lambda values and see if we get better results.",
"_____no_output_____"
]
],
[
[
"lambdas = [5,20,40,50,100]\nbestresults = multipleLambdas(X,T,5,lambdas)\nbestresults",
"_____no_output_____"
]
],
[
[
"The least validation error obtained in this case is 4.185 for $\\lambda=5$ which is almost the same as linear regression method.\n\nLet's try using the neural networks for building the model and see if yeilds better results.",
"_____no_output_____"
],
[
"### Neural Networks Model\n\nA neural network can be thought of as a network of “neurons” organised in layers. The predictors (or inputs) form the bottom layer, and the forecasts (or outputs) form the top layer.",
"_____no_output_____"
],
[
"#### Method\n\nI have use two methods - **trainNN** and **evaluateNN** for training and evaluating the neural network model. In the trainNN method, we first create an object nnet of the NeuralNetwork class and then we train it for different iterations. This method returns the nnet object. The evaluate method takes this nnet object and uses it to get the predicted model and then we compute the error.",
"_____no_output_____"
]
],
[
[
"from neuralnetworks import NeuralNetwork as nn",
"_____no_output_____"
],
[
"def trainNN(X,T, parameters):\n nnet = nn(X.shape[1], parameters[0], T.shape[1])\n nnet.train(X,T, nIterations=parameters[1], verbose=False)\n return {'net': nnet}",
"_____no_output_____"
],
[
"def evaluateNN(model,X,T):\n nnet = model['net']\n predict = nnet.use(X)\n return np.sqrt(np.mean( (predict-T)**2 ))",
"_____no_output_____"
]
],
[
[
"Similar to the above method, we define a **trainValidateTestKFolds** method which returns the following:\n\n1. the best parameter with number of hidden layers and iterations(hidden layers in case of neural networks)\n2. the best parameter values with the training error\n3. the mean of the validation error\n4. the testing error.",
"_____no_output_____"
]
],
[
[
"def trainValidateTestKFolds(X,T,parameterSets,nFolds,\n shuffle=False,verbose=False):\n # Randomly arrange row indices\n rowIndices = np.arange(X.shape[0])\n if shuffle:\n np.random.shuffle(rowIndices)\n # Calculate number of samples in each of the nFolds folds\n nSamples = X.shape[0]\n nEach = int(nSamples / nFolds)\n if nEach == 0:\n raise ValueError(\"partitionKFolds: Number of samples in each fold is 0.\")\n # Calculate the starting and stopping row index for each fold.\n # Store in startsStops as list of (start,stop) pairs\n starts = np.arange(0,nEach*nFolds,nEach)\n stops = starts + nEach\n stops[-1] = nSamples\n startsStops = list(zip(starts,stops))\n # Repeat with testFold taking each single fold, one at a time\n results = []\n # For each test fold\n for testFold in range(nFolds):\n #initializing bestMean to infinity\n bestMean = float(\"inf\")\n # For each set of parameter values, called parmSet\n for paramset in parameterSets:\n # Find best set of parameter values\n # For each validate fold (except when same as test fold)\n sum = 0\n for validateFold in range(nFolds):\n #Checking if it is same as test fold so \n if testFold == validateFold:\n continue\n #After selecting test and validate, the remaining are trainFolds\n #so we subtract the test and validate folds from other folds\n trainFolds = np.setdiff1d(range(nFolds), [testFold,validateFold])\n # Constructing Xtrain and Ttrain by collecting rows of all trainFolds\n rows = []\n for tf in trainFolds:\n i,j = startsStops[tf]\n rows += rowIndices[i:j].tolist()\n Xtrain = X[rows,:]\n Ttrain = T[rows,:]\n\n #Use trainf to fit model to training data using parmSet\n model = trainNN(Xtrain,Ttrain,paramset)\n\n # Constructing Xvalidate and Tvalidate\n i,j = startsStops[validateFold]\n rows = rowIndices[i:j]\n Xvalidate = X[rows,:]\n Tvalidate = T[rows,:]\n\n # Calculate the error of this model by calling evaluatef with \n # the model and validation data\n\n error = evaluateNN(model, Xvalidate, Tvalidate)\n\n sum = sum+error\n # Calculate the mean of these errors.\n currentMean = sum/(nFolds-1)\n # If this error is less than the previously best error for parmSet, \n # update best parameter values and best error\n if(bestMean > currentMean):\n #print(bestMean, currentMean)\n bestMean = currentMean\n bestLambda = paramset\n # Make a new set of training data by concatenating the training and \n # validation data from previous step.\n bestT = np.concatenate((Ttrain,Tvalidate),axis=0)\n bestX = np.concatenate((Xtrain,Xvalidate),axis=0)\n\n # Retrain, using trainf again, to fit a new model using the best set of parameter values\n # found above, to this new training data.\n newModel = trainNN(bestX,bestT,bestLambda)\n\n # Calculate error of this new model on the test data, and also on the new\n # training data.\n trainError = evaluateNN(newModel,bestX, bestT)\n\n # Construct Xtest and Ttest\n i,j = startsStops[testFold]\n rows = rowIndices[i:j]\n Xtest = X[rows,:]\n Ttest = T[rows,:]\n\n testError = evaluateNN(newModel,Xtest, Ttest)\n\n bestList = [bestLambda,bestMean,trainError,testError]\n\n if verbose:\n print(bestList)\n\n results.append(bestList)\n return results",
"_____no_output_____"
],
[
"import itertools\nparms = list(itertools.product([[5],[1,20],[10,10,100]], [10,50,75,200]))\nresult = trainValidateTestKFolds(X, T, parms, nFolds=5, shuffle=False)\nfor x in result:\n print('{:>30s} {:10.3f} {:10.3f} {:10.3f}'.format(str(x[0]), *x[1:]))",
" ([5], 10) 4.224 3.263 5.252\n([10, 10, 100], 75) 4.621 4.001 4.491\n([10, 10, 100], 10) 3.042 5.069 7.529\n ([1, 20], 75) 4.462 3.052 5.660\n ([1, 20], 50) 3.176 3.004 2.165\n"
]
],
[
[
"Let's try using different number of iterations and hidden layers",
"_____no_output_____"
]
],
[
[
"parms = list(itertools.product([[5],[1,3,20],[10,10,100,1]], [10,50,75,200]))\nresult = trainValidateTestKFolds(X, T, parms, nFolds=5, shuffle=False)\nfor x in result:\n print('{:>30s} {:10.3f} {:10.3f} {:10.3f}'.format(str(x[0]), *x[1:]))",
" ([1, 3, 20], 10) 3.294 7.234 4.262\n([10, 10, 100, 1], 10) 4.811 4.562 4.961\n ([1, 3, 20], 50) 3.112 5.069 5.332\n ([1, 3, 20], 200) 3.462 2.351 2.865\n ([1, 3, 20], 75) 5.116 5.104 2.165\n"
]
],
[
[
"We can see that neural network with hidden layers [1,3,20] and 200 iterations gives the best results. It gives an error of 2.31 which is lesser compared to the above two methods.",
"_____no_output_____"
],
[
"## Experimental Results\n\nFrom the above results we can see that the best model is obtained when we use Neural Networks with [1, 3, 20] hidden layers and for 200 iterations. Now as we have the model, we can use this model to predict the crimes in Chicago for the next years.\n\nWhenever we want to predict the crime(when the latitude, location, day of the week are given), we consider the given inputs as test data and use the model that was built previously to predict the output. Thus we get the predicted crime and also the probability that the data predicted is correct.",
"_____no_output_____"
],
[
"## Conclusion\n\nOn Analyzing the Chicago Crimes Data, we can see that maximum Crimes occur on Streets and Residence areas, and the types of Crimes that occur most frequently are Theft and Battery. Also we can see that the Maximum Crimes have occured in 2008. The number of arrests are very less compared to the number of crimes.\n\nBy using Neural Network with 200 iterations and [1, 3, 20] hidden units, we have built a model with the 2012-2017 crimes data which helps in predicting the type of crime given the latitude, longitude, day of the week and Year.\n\nThis approach can be further extended by considering the time at which the crime has occured. By doing so, we can yield better results.",
"_____no_output_____"
],
[
"## References\n\n* https://www.kaggle.com/currie32/crimes-in-chicago\n* https://www.kaggle.com/femiogunbode/eda-of-crime-in-chicago-from-2012-2016/discussion\n* https://www.kaggle.com/djonafegnem/chicago-crime-data-analysis\n* http://pandas.pydata.org/\n* http://pandas.pydata.org/pandas-docs/stable/tutorials.html\n\n",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
4a194e12a9867b08560f7982fa100bde1744c0dd
| 45,966 |
ipynb
|
Jupyter Notebook
|
notebooks/overlap_study.ipynb
|
mbatchkarov/ExpLosion
|
705039ec5f77c4203c98487f80d74b9d1f4fd501
|
[
"BSD-3-Clause"
] | 1 |
2015-10-21T08:53:55.000Z
|
2015-10-21T08:53:55.000Z
|
notebooks/overlap_study.ipynb
|
mbatchkarov/ExpLosion
|
705039ec5f77c4203c98487f80d74b9d1f4fd501
|
[
"BSD-3-Clause"
] | null | null | null |
notebooks/overlap_study.ipynb
|
mbatchkarov/ExpLosion
|
705039ec5f77c4203c98487f80d74b9d1f4fd501
|
[
"BSD-3-Clause"
] | null | null | null | 245.807487 | 42,118 | 0.922878 |
[
[
[
"# How many neighbours of an entry overlap lexically?\nProportion of neigh that overlap in the first 100 neighbours.",
"_____no_output_____"
]
],
[
[
"%cd ~/NetBeansProjects/ExpLosion/\nfrom notebooks.common_imports import *\nfrom gui.output_utils import *\nfrom gui.user_code import pretty_names\nfrom discoutils.thesaurus_loader import Vectors\nfrom random import sample",
"/Volumes/LocalDataHD/m/mm/mmb28/NetBeansProjects/ExpLosion\n"
],
[
"path = '../FeatureExtractionToolkit/word2vec_vectors/composed/AN_NN_word2vec-wiki_15percent-rep0_Add.events.filtered.strings'\nw = Vectors.from_tsv(path, allow_lexical_overlap=True)",
"_____no_output_____"
],
[
"w.init_sims(n_neighbors=100)",
"_____no_output_____"
],
[
"unigrams = list(x for x in w.keys() if x.count('_') < 1)\nphrases = list(x for x in w.keys() if x.count('_') >= 1)",
"_____no_output_____"
],
[
"w.get_nearest_neighbours_linear.cache_clear()\n%lprun -f Vectors.get_nearest_neighbours_linear w.get_nearest_neighbours_linear('car/N')",
"_____no_output_____"
],
[
"len(unigrams), len(phrases), len(w)",
"_____no_output_____"
],
[
"ratios = []\nfor entry in random.sample(phrases, 100):\n before = w.get_nearest_neighbours(entry)\n after = Vectors.remove_overlapping_neighbours(entry, before)\n ratios.append(len(after) / len(before))",
"_____no_output_____"
],
[
"# plt.hist(ratios, bins=20);\nax = sns.distplot(np.array(ratios), bins=20, kde_kws=dict(cut=True))\nax.set_xlim(0, 1)",
"_____no_output_____"
],
[
"sns.kdeplot?",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a19521c9280e13edebdcc2c41a6c2d7d647e39d
| 8,244 |
ipynb
|
Jupyter Notebook
|
chapter3/Chapter3.ipynb
|
PacktPublishing/Hands-On-Artificial-Intelligence-for-Beginners
|
bcf7270dd3e01baf4870b88c65da2bcc581aaec8
|
[
"MIT"
] | 19 |
2018-05-11T20:00:56.000Z
|
2022-02-06T14:27:25.000Z
|
chapter3/Chapter3.ipynb
|
PacktPublishing/Hands-On-Artificial-Intelligence-for-Beginners
|
bcf7270dd3e01baf4870b88c65da2bcc581aaec8
|
[
"MIT"
] | 1 |
2018-12-18T23:08:24.000Z
|
2019-01-22T19:16:51.000Z
|
chapter3/Chapter3.ipynb
|
PacktPublishing/Hands-On-Artificial-Intelligence-for-Beginners
|
bcf7270dd3e01baf4870b88c65da2bcc581aaec8
|
[
"MIT"
] | 14 |
2018-07-18T17:56:14.000Z
|
2022-02-06T14:27:28.000Z
| 21.524804 | 611 | 0.5131 |
[
[
[
"# Chapter 3: Deep Learning Libraries",
"_____no_output_____"
],
[
"This chapter discusses the important libraries and frameworks that one needs to get started in artificial intelligence. We'll cover the basic functions of the three most popular deep learning frameworks: Tensorflow, Pytorch, and Keras, and show you how to get up and running in each of these frameworks as we will be utilizing them in the following chapters. We'll touch upon computing for Artificial Intelligence, and discuss how GPUs and other advanced memory units can improve AI. Lastly, we'll discuss the fundamentals of two popular cloud computing frameworks for deep learning, AWS and Google Cloud.",
"_____no_output_____"
]
],
[
[
"import numpy as np",
"_____no_output_____"
]
],
[
[
"## TensorFlow Basics",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf",
"_____no_output_____"
],
[
"## Define two constants \nx = tf.constant(2)\ny = tf.constant(2)\n\n## Multiply the constants\nproduct = tf.multiply(x, y)",
"_____no_output_____"
],
[
"init = tf.initialize_all_variables()\n\n## In Tensorflow, we must first initialize a session object\nsess = tf.Session()\nsess.run(init)\n\n## Run the session\nprint(sess.run(product)) \n\n## Close the session\nsess.close()",
"4\n"
]
],
[
[
"Creating a new graph",
"_____no_output_____"
]
],
[
[
"my_graph = tf.Graph()\n\nwith new_graph.as_default():\n x = tf.constant(2)\n y = tf.constant(2)",
"_____no_output_____"
]
],
[
[
"Scopes:",
"_____no_output_____"
]
],
[
[
"with tf.name_scope(\"my_scope\"):\n ## Define two constants \n const1 = tf.constant([4])\n const2 = tf.constant([5])\n\n ## Multiply the constants\n product = tf.multiply(const1, const2)",
"_____no_output_____"
]
],
[
[
"## Keras Basics",
"_____no_output_____"
],
[
"As Keras is designed as a model-level library, it does not contain methods for doing basic operations as PyTorch of base TensorFlow does. Instead, it utilizes TensorFlow as a backend. As such, its basic operations are the same as basic TensorFlow operations: ",
"_____no_output_____"
]
],
[
[
"import keras.backend as K",
"Using TensorFlow backend.\n"
],
[
"x = K.constant(5)\ny = K.constant(6)\nproduct = x * y",
"_____no_output_____"
]
],
[
[
"## PyTorch",
"_____no_output_____"
]
],
[
[
"import torch",
"_____no_output_____"
],
[
"x = torch.IntTensor([4])\ny = torch.IntTensor([5])\nproduct = x * y",
"_____no_output_____"
]
],
[
[
"It's easy to switch between numpy and pytorch",
"_____no_output_____"
]
],
[
[
"## Create a numpy array\nnumpy_array = np.random.randn(10,10)\n\n##Convert the numpy array to a pytorch tesnor\npytorch_tensor = torch.from_numpy(numpy_array)\n\n## Convert it back to Numpy\nnumpy_again = pytorch_tensor.numpy()",
"_____no_output_____"
]
],
[
[
"Pytorch tensors can be manipulated in a way that is similar to numpy",
"_____no_output_____"
]
],
[
[
"tensor = torch.FloatTensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n\n## print the third element of the 2nd row of the tensor\nprint(tensor[1][2])",
"tensor(6.)\n"
],
[
"## replace the second value of the first tensor\ntensor[0][1] = 1\nprint(tensor)",
"tensor([[1., 1., 3.],\n [4., 5., 6.]])\n"
]
],
[
[
"Like TensorFlow, PyTorch runs on the concept of variables, which are values that are intended to change and be updated during training processes",
"_____no_output_____"
]
],
[
[
"from torch.autograd import Variable\n\n## Create a tensor\ntensor_two = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n\n## Convert it to a variable\nvariable = Variable(tensor_two)",
"_____no_output_____"
],
[
"variable.data",
"_____no_output_____"
]
],
[
[
"## TensorFlow Logging",
"_____no_output_____"
]
],
[
[
"my_list = []\n## Iterate through the available GPUs\nfor device in ['/gpu:0', '/gpu:1']:\n ## Utilize the TensorFlow device manager\n with tf.device(device):\n x = tf.constant([1,2,3], shape=[1,3])\n y = tf.constant([1,2,3],shape [3,1]) \n my_list.append(tf.matmul(x, y))\n \n with tf.device('/cpu:0'):\n sum_operation = tf.add(x,y)\n \n ## Run everything through a session\n sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))\n sess.run(sum_operation)\n",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a1959530b2e0a69dd730c22ae905a8142c72553
| 60,411 |
ipynb
|
Jupyter Notebook
|
dataqsa/dataqsa-counting.ipynb
|
hurschler/pig-face-recognition
|
5834f3c89448a645ee0eaf2bbdade064f0c4be93
|
[
"Apache-2.0"
] | 1 |
2021-11-19T05:33:39.000Z
|
2021-11-19T05:33:39.000Z
|
dataqsa/dataqsa-counting.ipynb
|
hurschler/pig-face-recognition
|
5834f3c89448a645ee0eaf2bbdade064f0c4be93
|
[
"Apache-2.0"
] | null | null | null |
dataqsa/dataqsa-counting.ipynb
|
hurschler/pig-face-recognition
|
5834f3c89448a645ee0eaf2bbdade064f0c4be93
|
[
"Apache-2.0"
] | 1 |
2022-01-05T12:57:12.000Z
|
2022-01-05T12:57:12.000Z
| 80.01457 | 22,896 | 0.547433 |
[
[
[
"import util.preprocessing as preprocessing\nimport util.detection_util as detection_util\n\nimport sys\nsys.path.append('../')\nimport pandas as pd\nimport numpy as np\nimport cv2\nimport os\nimport xlrd\nimport openpyxl\nimport matplotlib.pyplot as plt\nimport util.config as config\n\nfrom matplotlib import pyplot as plt\nfrom matplotlib.font_manager import FontProperties\nfont = {'family' : 'Arial',\n 'weight' : 'medium',\n 'size' : 12,\n 'style' : 'normal'}\nfont0 = {'family' : 'Arial',\n 'weight' : 'medium',\n 'size' : 12,\n 'style' : 'italic'}\n\nplt.rcParams['mathtext.fontset'] = 'custom'\nplt.rcParams['mathtext.it'] = 'Arial:italic'\nplt.rcParams['mathtext.rm'] = 'Arial'\nplt.rcParams['figure.dpi']= 300\nplt.rcParams['patch.antialiased'] = True\nplt.rcParams['lines.antialiased'] = True\nplt.rcParams['text.antialiased'] = True\nplt.rcParams['figure.figsize'] = [6, 3]\n\nplt.rc('font', **font)\nplt.rc('text', usetex=False)\n\nnp.warnings.filterwarnings('ignore', category=np.VisibleDeprecationWarning)\n",
"_____no_output_____"
],
[
"df = pd.read_excel(r'D:\\Users\\avatar\\PycharmProjects\\pig-face-recognition\\sample\\train.xlsx')\nprint(df.head().to_string())\ndf.head(5)",
" image_name pig_name age bright contrast createdate flash img_height img_width setversion sex sharpness type weight perspective full_pig_face over_exposed missing_element\n0 DSC_V1_6460_2238.JPG 6460 0 64.920095 1.0 2021:02:02 07:01:27 [7] 2848 4288 V1 NaN 31.764733 jpg 0 f 1 NaN NaN\n1 DSC_V1_6460_2239.JPG 6460 0 68.491263 1.0 2021:02:02 07:01:34 [7] 2848 4288 V1 NaN 47.600315 jpg 0 f 1 NaN NaN\n2 DSC_V1_6460_2240.JPG 6460 0 61.175416 1.0 2021:02:02 07:01:35 [7] 2848 4288 V1 NaN 58.129255 jpg 0 t 1 NaN NaN\n3 DSC_V1_6460_2241.JPG 6460 0 71.499618 1.0 2021:02:02 07:02:10 [7] 2848 4288 V1 NaN 25.567715 jpg 0 t 1 NaN NaN\n4 DSC_V1_6460_2242.JPG 6460 0 45.096860 1.0 2021:02:02 07:02:11 [7] 2848 4288 V1 NaN 9.974902 jpg 0 t 1 NaN NaN\n"
],
[
"df = df.astype({'image_name': 'str', 'perspective': 'str', 'full_pig_face': 'int'})\ndf['perspective'] = pd.Categorical(df.perspective)\ndf.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 931 entries, 0 to 930\nData columns (total 18 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 image_name 931 non-null object \n 1 pig_name 931 non-null int64 \n 2 age 931 non-null int64 \n 3 bright 931 non-null float64 \n 4 contrast 931 non-null float64 \n 5 createdate 931 non-null object \n 6 flash 931 non-null object \n 7 img_height 931 non-null int64 \n 8 img_width 931 non-null int64 \n 9 setversion 931 non-null object \n 10 sex 413 non-null object \n 11 sharpness 931 non-null float64 \n 12 type 931 non-null object \n 13 weight 931 non-null int64 \n 14 perspective 931 non-null category\n 15 full_pig_face 931 non-null int32 \n 16 over_exposed 69 non-null float64 \n 17 missing_element 87 non-null object \ndtypes: category(1), float64(4), int32(1), int64(5), object(7)\nmemory usage: 121.3+ KB\n"
],
[
"ax = df.perspective.value_counts().reindex([\"t\", \"f\", \"sr\", \"sl\"]).plot(kind=\"bar\", color='#607c8e')\nplt.show()\nprint (df.perspective.value_counts().reindex([\"t\", \"f\", \"sr\", \"sl\"]))",
"_____no_output_____"
],
[
"# Filter df, use only perspective front and top\n\ndf_f = df['perspective']=='f'\nprint(df_f.head().to_string())",
"0 True\n1 True\n2 False\n3 False\n4 False\n"
],
[
"df_q = df.query('(perspective == \"t\" or perspective == \"f\") & over_exposed != 1 & missing_element.isnull()')\ndf_q = df_q.query('bright >= 30')\ndf_q = df_q.query('sharpness >= 11')\ndf_q = df_q.sort_values('image_name')\n\n\nprint(df_q.head(15).to_string())\nindex = df_q.index\nprint (len(index))",
" image_name pig_name age bright contrast createdate flash img_height img_width setversion sex sharpness type weight perspective full_pig_face over_exposed missing_element\n0 DSC_V1_6460_2238.JPG 6460 0 64.920095 1.0 2021:02:02 07:01:27 [7] 2848 4288 V1 NaN 31.764733 jpg 0 f 1 NaN NaN\n1 DSC_V1_6460_2239.JPG 6460 0 68.491263 1.0 2021:02:02 07:01:34 [7] 2848 4288 V1 NaN 47.600315 jpg 0 f 1 NaN NaN\n2 DSC_V1_6460_2240.JPG 6460 0 61.175416 1.0 2021:02:02 07:01:35 [7] 2848 4288 V1 NaN 58.129255 jpg 0 t 1 NaN NaN\n3 DSC_V1_6460_2241.JPG 6460 0 71.499618 1.0 2021:02:02 07:02:10 [7] 2848 4288 V1 NaN 25.567715 jpg 0 t 1 NaN NaN\n6 DSC_V1_6460_2244.JPG 6460 0 57.334208 1.0 2021:02:02 07:02:14 [7] 2848 4288 V1 NaN 13.728085 jpg 0 f 0 NaN NaN\n8 DSC_V1_6460_2247.JPG 6460 0 39.071980 1.0 2021:02:02 07:02:27 [7] 2848 4288 V1 NaN 42.245624 jpg 0 f 1 NaN NaN\n11 DSC_V1_6471_2481.JPG 6471 0 54.296657 1.0 2021:02:02 07:42:24 [7] 2848 4288 V1 NaN 96.214984 jpg 0 t 0 NaN NaN\n12 DSC_V1_6471_2482.JPG 6471 0 54.321800 1.0 2021:02:02 07:42:45 [7] 2848 4288 V1 NaN 89.609565 jpg 0 t 0 NaN NaN\n17 DSC_V1_6472_2262.JPG 6472 0 51.484690 1.0 2021:02:02 07:05:19 [7] 2848 4288 V1 NaN 18.116662 jpg 0 t 0 NaN NaN\n18 DSC_V1_6472_2264.JPG 6472 0 75.321314 1.0 2021:02:02 07:06:16 [7] 2848 4288 V1 NaN 34.420141 jpg 0 t 1 NaN NaN\n21 DSC_V1_6472_2267.JPG 6472 0 45.584401 1.0 2021:02:02 07:06:25 [7] 2848 4288 V1 NaN 59.136640 jpg 0 t 1 NaN NaN\n22 DSC_V1_6472_2268.JPG 6472 0 50.907474 1.0 2021:02:02 07:06:26 [7] 2848 4288 V1 NaN 23.182936 jpg 0 f 0 NaN NaN\n23 DSC_V1_6472_2269.JPG 6472 0 56.426124 1.0 2021:02:02 07:06:36 [7] 2848 4288 V1 NaN 75.760866 jpg 0 t 0 NaN NaN\n24 DSC_V1_6472_2270.JPG 6472 0 35.998657 1.0 2021:02:02 07:06:36 [7] 2848 4288 V1 NaN 22.197272 jpg 0 t 0 NaN NaN\n26 DSC_V1_6476_2327.JPG 6476 0 63.804732 1.0 2021:02:02 07:14:06 [7] 2848 4288 V1 NaN 16.414235 jpg 0 f 0 NaN NaN\n443\n"
],
[
"# Group by pig_name\ndf_group_by_pig_name = df_q.groupby('pig_name').count()\ndf_group_by_pig_name.head()\n",
"_____no_output_____"
],
[
"df_group_by_pig_name = df_group_by_pig_name.query('(image_name >= 5)')\nprint (df_group_by_pig_name.to_string())\nindex = df_group_by_pig_name.index\nprint ('----------------------------------------------------------------------')\nprint (len(index))\n\n",
" image_name age bright contrast createdate flash img_height img_width setversion sex sharpness type weight perspective full_pig_face over_exposed missing_element\npig_name \n6358 6 6 6 6 6 6 6 6 6 0 6 6 6 6 6 0 0\n6385 6 6 6 6 6 6 6 6 6 0 6 6 6 6 6 0 0\n6390 9 9 9 9 9 9 9 9 9 0 9 9 9 9 9 0 0\n6408 5 5 5 5 5 5 5 5 5 0 5 5 5 5 5 0 0\n6418 5 5 5 5 5 5 5 5 5 0 5 5 5 5 5 0 0\n6422 5 5 5 5 5 5 5 5 5 0 5 5 5 5 5 0 0\n6432 5 5 5 5 5 5 5 5 5 0 5 5 5 5 5 0 0\n6444 5 5 5 5 5 5 5 5 5 0 5 5 5 5 5 0 0\n6446 6 6 6 6 6 6 6 6 6 0 6 6 6 6 6 0 0\n6460 6 6 6 6 6 6 6 6 6 0 6 6 6 6 6 0 0\n6472 6 6 6 6 6 6 6 6 6 0 6 6 6 6 6 0 0\n6476 6 6 6 6 6 6 6 6 6 0 6 6 6 6 6 0 0\n6489 7 7 7 7 7 7 7 7 7 0 7 7 7 7 7 0 0\n6495 7 7 7 7 7 7 7 7 7 0 7 7 7 7 7 0 0\n6497 5 5 5 5 5 5 5 5 5 0 5 5 5 5 5 0 0\n6503 5 5 5 5 5 5 5 5 5 0 5 5 5 5 5 0 0\n6507 7 7 7 7 7 7 7 7 7 0 7 7 7 7 7 0 0\n6512 8 8 8 8 8 8 8 8 8 0 8 8 8 8 8 0 0\n6516 5 5 5 5 5 5 5 5 5 0 5 5 5 5 5 0 0\n6518 5 5 5 5 5 5 5 5 5 0 5 5 5 5 5 0 0\n6520 8 8 8 8 8 8 8 8 8 0 8 8 8 8 8 0 0\n6522 7 7 7 7 7 7 7 7 7 0 7 7 7 7 7 0 0\n6523 6 6 6 6 6 6 6 6 6 0 6 6 6 6 6 0 0\n6524 5 5 5 5 5 5 5 5 5 0 5 5 5 5 5 0 0\n6525 7 7 7 7 7 7 7 7 7 0 7 7 7 7 7 0 0\n6527 6 6 6 6 6 6 6 6 6 0 6 6 6 6 6 0 0\n6529 5 5 5 5 5 5 5 5 5 0 5 5 5 5 5 0 0\n6531 5 5 5 5 5 5 5 5 5 0 5 5 5 5 5 0 0\n6551 5 5 5 5 5 5 5 5 5 0 5 5 5 5 5 0 0\n6552 8 8 8 8 8 8 8 8 8 0 8 8 8 8 8 0 0\n6555 5 5 5 5 5 5 5 5 5 0 5 5 5 5 5 0 0\n6557 5 5 5 5 5 5 5 5 5 0 5 5 5 5 5 0 0\n6558 6 6 6 6 6 6 6 6 6 0 6 6 6 6 6 0 0\n6560 7 7 7 7 7 7 7 7 7 0 7 7 7 7 7 0 0\n6566 6 6 6 6 6 6 6 6 6 0 6 6 6 6 6 0 0\n6574 7 7 7 7 7 7 7 7 7 0 7 7 7 7 7 0 0\n6575 8 8 8 8 8 8 8 8 8 0 8 8 8 8 8 0 0\n6578 8 8 8 8 8 8 8 8 8 0 8 8 8 8 8 0 0\n6589 5 5 5 5 5 5 5 5 5 0 5 5 5 5 5 0 0\n6950 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 0 0\n6952 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 0 0\n6953 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 0 0\n6954 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 0 0\n6966 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 0 0\n6972 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 0 0\n6975 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 0 0\n6978 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 0 0\n6986 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 0 0\n6992 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 0 0\n6994 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 0 0\n6997 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 0 0\n7006 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 0 0\n7007 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 0 0\n7014 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 0 0\n7023 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 0 0\n7029 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 0 0\n7031 12 12 12 12 12 12 12 12 12 12 12 12 12 12 12 0 0\n----------------------------------------------------------------------\n57\n"
],
[
"# Print the clean pig_names Index List \nprint (df_group_by_pig_name.index)\n",
"Int64Index([6358, 6385, 6390, 6408, 6418, 6422, 6432, 6444, 6446, 6460, 6472,\n 6476, 6489, 6495, 6497, 6503, 6507, 6512, 6516, 6518, 6520, 6522,\n 6523, 6524, 6525, 6527, 6529, 6531, 6551, 6552, 6555, 6557, 6558,\n 6560, 6566, 6574, 6575, 6578, 6589, 6950, 6952, 6953, 6954, 6966,\n 6972, 6975, 6978, 6986, 6992, 6994, 6997, 7006, 7007, 7014, 7023,\n 7029, 7031],\n dtype='int64', name='pig_name')\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a19686a35ef27ee55f783e009b597540ab7356b
| 118,411 |
ipynb
|
Jupyter Notebook
|
jupyter/TestMonitorServiceExample.ipynb
|
alexweav/systemlink-python-examples
|
afe9f349075bf274336d58876ecb0e3ce37572c8
|
[
"MIT"
] | 4 |
2019-07-09T16:53:39.000Z
|
2021-07-22T11:41:58.000Z
|
jupyter/TestMonitorServiceExample.ipynb
|
alexweav/systemlink-python-examples
|
afe9f349075bf274336d58876ecb0e3ce37572c8
|
[
"MIT"
] | null | null | null |
jupyter/TestMonitorServiceExample.ipynb
|
alexweav/systemlink-python-examples
|
afe9f349075bf274336d58876ecb0e3ce37572c8
|
[
"MIT"
] | null | null | null | 340.261494 | 62,364 | 0.919585 |
[
[
[
"<table>\n <tr>\n <td><img src='SystemLink_icon.png' /></td>\n <td ><h1><strong>NI SystemLink Python API</strong></h1></td>\n </tr>\n</table>\n\n## Test Monitor Service Example\n***\nThe Test Monitor Service API provides functions to create, update, delete and query Test results and Test steps.\n***\n# Prerequisites\n- The **NI SystemLink Server Test Module** needs to be installed in order to run this example\n- The **NI SystemLink Client** needs to be installed on a system which has TestStand installed and is registered to the SystemLink server. Configure the SystemLink TestStand plugin reporting to enable publishing test results.\n- Before you run this example, TestStand mock test results are needed:\n - From **TestStand** open the **'Computer Motherboard Test Sequence.seq'**:\n - Go to Help -> Find Examples and follow the instructions to open the Examples workspace (Examples.tsw)\n - From the Workspace tab, expand **Demos** and select **Computer Motherboard Test**. Open one of the sequence files, based on your language of choice\n - Run the sequence at least 10 times\n - Make sure you fail several tests, on different components\n\n# Summary \nThis notebook uses the Test Monitor Service API to import test and step results into Python. The data is used to do custom analytics.\n\n- Get all the test results that were created from the 'Computer Motherboard Test Sequence.seq' \n- Create a Pandas Dataframe with the information we want to process for each test\n- Plot pass vs. fail tests\n- Visualize test run vs. test duration\n- Pareto graph (step type)\n***",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\nfrom systemlink.testmonclient import TestMonitorClient, testmon_messages\n\ntestmonclient = TestMonitorClient(service_name='TestMonitorClient')\n\n# Create pandas dataframe with the relevant test results information, to be used later\ndef get_dataframe_from_results(results):\n return pd.concat([pd.DataFrame({'status': result.status.status_name,\n 'startedAt': result.started_at,\n 'updatedAt': result.updated_at,\n 'programName': result.program_name,\n 'id': result.id,\n 'systemId': result.system_id,\n 'operator': result.operator,\n 'serialNumber': result.serial_number,\n 'totalTimeInSeconds': result.total_time_in_seconds,\n }, index=[idx]) for idx, result in enumerate(results)])\n\n# Only query test results that belong to the 'Computer Motherboard Test Sequence.seq' test program\nquery = testmon_messages.ResultQuery(None, None, None, ['Computer Motherboard Test Sequence.seq'], None, None, None, None, None, None, None, None, None)\n\nresults, _ = testmonclient.query_results(query)\ndf_results = get_dataframe_from_results(results)\n\n# Show the first elements of the dataframe, which holds the data we will use for further analysis\ndf_results[:2]",
"_____no_output_____"
]
],
[
[
"# Bar Plot of Test Results\nGroup the tests results by pass/fail. Create a bar plot to visualize the test runs by result.",
"_____no_output_____"
]
],
[
[
"# Visualize tests results (pass/fail)\n\nbar_width = 0.4\nopacity = 0.4\n\nres = df_results.groupby('status').count()\nfailed = res['id']['Failed']\npassed = res['id']['Passed']\n\nplt.style.use('fivethirtyeight')\n\nfig = plt.figure(figsize=(7, 7))\nplt.bar(1, passed, bar_width, alpha=opacity, color='b', label='Pass')\nplt.bar(1.5, failed, bar_width, alpha=opacity, color='r', label='Fail')\nplt.xticks([1, 1.5], ['Pass', 'Fail'], size='15')\nplt.ylabel('Runs', size='15')\nplt.title('Total Runs: ' + str(passed + failed), weight='bold', size='15')\nplt.show()",
"_____no_output_____"
]
],
[
[
"# Plot Test Run vs. Duration\nVisualize the test runs vs. duration, with red/green color indicating pass/fail.",
"_____no_output_____"
]
],
[
[
"# Visualize test failures vs duration\n\nresult_idx = np.arange(df_results.shape[0])\n\ndf_time = df_results[['totalTimeInSeconds', 'status']]\n\ncolor = ['r' if status == 'Failed' else 'g' for status in df_time['status']]\n\nfig = plt.figure(figsize=(10, 7))\nplt.scatter(result_idx, df_time['totalTimeInSeconds'], s=150, c=color, alpha='0.5')\nplt.title('Test Results - Duration', weight='bold', size='15')\nplt.xlabel('Test Runs', size='15')\nplt.ylabel('Time (seconds)', size='15')\nplt.show()",
"_____no_output_____"
]
],
[
[
"# Pareto distribution\nGet a Pandas Dataframe with all the step failures. Visualize the failures in a Pareto graph, which helps visualize the failure distribution, by step type.",
"_____no_output_____"
]
],
[
[
"# Pareto distribution of step failures visualization\n\n# Create pandas dataframe with the step results information that we want for further processing\ndef get_failed_steps_dataframe(steps):\n failed_steps = [step for step in steps if step.status.status_name == 'Failed' and step.step_type != 'SequenceCall']\n return pd.concat([pd.DataFrame({'name': step.name,\n 'id': step.step_id,\n 'totalTimeInSeconds': step.total_time_in_seconds,\n }, index=[idx]) for idx, step in enumerate(failed_steps)])\n\nresults_ids = [result.id for result in results]\nstep_query = testmon_messages.StepQuery(None, None, None, results_ids, None, None, None, None, None, None)\n\nsteps, _ = testmonclient.query_steps(step_query)\nsteps_df = get_failed_steps_dataframe(steps)\nres = steps_df.groupby('name').count()\nres = res.sort_values('id', ascending=False)\n\nfig, ax1 = plt.subplots()\nfig.set_size_inches(15, 7)\n\nplt.title('Failures by Test', weight='bold', size='15')\nplt.ylabel('Number of Runs', size='15')\nplt.xlabel('Test Type', size='15')\nax1.get_xaxis().set_ticks([])\n\n# Create the Pareto chart bars\nprevious_val = 0\ncumulative = []\nfor idx, row in res.iterrows():\n val = row['id']\n cumulative.append(val + previous_val)\n previous_val = val + previous_val\n ax1.bar(idx, val, bar_width, alpha=opacity, label=idx)\n\n# Add a legend\nlabels = list(steps_df['name'])\nplt.legend(labels, loc='upper right')\n\n# Cumulative line, in percentage\ncumulative_percentage = cumulative/cumulative[-1] * 100\n\nax2 = ax1.twinx()\nax2.set_ylim([0, 100])\nax2.plot(cumulative_percentage)\nplt.ylabel('Failure Percentage', size='15')\nplt.show()",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a196cd5a62a377099a6a27bfe5539db09436384
| 767,961 |
ipynb
|
Jupyter Notebook
|
13_OverfittingAndRegularization.ipynb
|
pranav090992/basic_python_codes
|
376c17ecb8065c0cfbb17753c3312b0bbed91877
|
[
"MIT"
] | 20 |
2020-01-04T16:35:48.000Z
|
2022-03-29T20:47:43.000Z
|
13_OverfittingAndRegularization.ipynb
|
pranav090992/basic_python_codes
|
376c17ecb8065c0cfbb17753c3312b0bbed91877
|
[
"MIT"
] | null | null | null |
13_OverfittingAndRegularization.ipynb
|
pranav090992/basic_python_codes
|
376c17ecb8065c0cfbb17753c3312b0bbed91877
|
[
"MIT"
] | 25 |
2020-01-17T14:48:39.000Z
|
2022-01-26T08:29:08.000Z
| 117.803497 | 32,304 | 0.815242 |
[
[
[
"<a href=\"https://colab.research.google.com/github/satyajitghana/PadhAI-Course/blob/master/13_OverfittingAndRegularization.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.colors\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score, mean_squared_error, log_loss\nfrom tqdm import tqdm_notebook \nimport seaborn as sns\n\nsns.set()\n\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.datasets import load_iris\n\nfrom numpy.linalg import norm",
"_____no_output_____"
],
[
"my_cmap = 'inferno'",
"_____no_output_____"
],
[
"np.random.seed(0)",
"_____no_output_____"
]
],
[
[
"## Generate data",
"_____no_output_____"
]
],
[
[
"iris=load_iris()\ndata = iris.data[:, :2] # take only the first two features\nlabels = iris.target",
"_____no_output_____"
],
[
"plt.scatter(data[:,0], data[:,1], c=labels, cmap=my_cmap)\nplt.show()",
"_____no_output_____"
],
[
"print(\"Data shape\",data.shape)\nprint(\"Labels shape\",labels.shape)",
"Data shape (150, 2)\nLabels shape (150,)\n"
]
],
[
[
"## Multi class classification",
"_____no_output_____"
]
],
[
[
"X_train, X_val, Y_train, Y_val = train_test_split(data, labels, stratify=labels, random_state=0,test_size=0.2)\nprint(X_train.shape, X_val.shape, labels.shape)",
"(120, 2) (30, 2) (150,)\n"
],
[
"enc = OneHotEncoder()\ny_OH_train = enc.fit_transform(np.expand_dims(Y_train,1)).toarray()\ny_OH_val = enc.fit_transform(np.expand_dims(Y_val,1)).toarray()\nprint(y_OH_train.shape, y_OH_val.shape)",
"(120, 3) (30, 3)\n"
]
],
[
[
"## FF Class",
"_____no_output_____"
]
],
[
[
"class FFNetwork:\n \n def __init__(self, num_hidden=2, init_method = 'xavier', activation_function = 'sigmoid', leaky_slope = 0.1):\n \n self.params={}\n self.num_layers=2\n self.layer_sizes = [2, num_hidden, 3]\n self.activation_function = activation_function\n self.leaky_slope = leaky_slope\n np.random.seed(0)\n \n if init_method == \"random\":\n for i in range(1,self.num_layers+1):\n self.params[\"W\"+str(i)] = np.random.randn(self.layer_sizes[i-1],self.layer_sizes[i])\n self.params[\"B\"+str(i)] = np.random.randn(1,self.layer_sizes[i])\n \n elif init_method == \"he\":\n for i in range(1,self.num_layers+1):\n self.params[\"W\"+str(i)] = np.random.randn(self.layer_sizes[i-1],self.layer_sizes[i])*np.sqrt(2/self.layer_sizes[i-1])\n self.params[\"B\"+str(i)] = np.random.randn(1,self.layer_sizes[i])\n \n elif init_method == \"xavier\":\n for i in range(1,self.num_layers+1):\n self.params[\"W\"+str(i)]=np.random.randn(self.layer_sizes[i-1],self.layer_sizes[i])*np.sqrt(1/self.layer_sizes[i-1])\n self.params[\"B\"+str(i)]=np.random.randn(1,self.layer_sizes[i])\n \n self.gradients={}\n self.update_params={}\n self.prev_update_params={}\n for i in range(1,self.num_layers+1):\n self.update_params[\"v_w\"+str(i)]=0\n self.update_params[\"v_b\"+str(i)]=0\n self.update_params[\"m_b\"+str(i)]=0\n self.update_params[\"m_w\"+str(i)]=0\n self.prev_update_params[\"v_w\"+str(i)]=0\n self.prev_update_params[\"v_b\"+str(i)]=0\n \n def forward_activation(self, X): \n if self.activation_function == \"sigmoid\":\n return 1.0/(1.0 + np.exp(-X))\n elif self.activation_function == \"tanh\":\n return np.tanh(X)\n elif self.activation_function == \"relu\":\n return np.maximum(0,X)\n elif self.activation_function == \"leaky_relu\":\n return np.maximum(self.leaky_slope*X,X)\n \n def grad_activation(self, X):\n if self.activation_function == \"sigmoid\":\n return X*(1-X) \n elif self.activation_function == \"tanh\":\n return (1-np.square(X))\n elif self.activation_function == \"relu\":\n return 1.0*(X>0)\n elif self.activation_function == \"leaky_relu\":\n d=np.zeros_like(X)\n d[X<=0]=self.leaky_slope\n d[X>0]=1\n return d\n \n def get_accuracy(self): \n Y_pred_train = model.predict(X_train)\n Y_pred_train = np.argmax(Y_pred_train,1)\n Y_pred_val = model.predict(X_val)\n Y_pred_val = np.argmax(Y_pred_val,1)\n accuracy_train = accuracy_score(Y_pred_train, Y_train)\n accuracy_val = accuracy_score(Y_pred_val, Y_val)\n return accuracy_train,accuracy_val\n \n def softmax(self, X):\n exps = np.exp(X)\n return exps / np.sum(exps, axis=1).reshape(-1,1)\n \n def forward_pass(self, X, params = None):\n if params is None:\n params = self.params\n self.A1 = np.matmul(X, params[\"W1\"]) + params[\"B1\"] # (N, 2) * (2, 2) -> (N, 2)\n self.H1 = self.forward_activation(self.A1) # (N, 2)\n self.A2 = np.matmul(self.H1, params[\"W2\"]) + params[\"B2\"] # (N, 2) * (2, 2) -> (N, 2)\n self.H2 = self.softmax(self.A2) # (N, 2)\n return self.H2\n \n def grad(self, X, Y, params = None):\n if params is None:\n params = self.params \n \n self.forward_pass(X, params)\n m = X.shape[0]\n self.gradients[\"dA2\"] = self.H2 - Y # (N, 4) - (N, 4) -> (N, 4)\n self.gradients[\"dW2\"] = np.matmul(self.H1.T, self.gradients[\"dA2\"]) # (2, N) * (N, 4) -> (2, 4)\n self.gradients[\"dB2\"] = np.sum(self.gradients[\"dA2\"], axis=0).reshape(1, -1) # (N, 4) -> (1, 4)\n self.gradients[\"dH1\"] = np.matmul(self.gradients[\"dA2\"], params[\"W2\"].T) # (N, 4) * (4, 2) -> (N, 2)\n self.gradients[\"dA1\"] = np.multiply(self.gradients[\"dH1\"], self.grad_activation(self.H1)) # (N, 2) .* (N, 2) -> (N, 2)\n self.gradients[\"dW1\"] = np.matmul(X.T, self.gradients[\"dA1\"]) # (2, N) * (N, 2) -> (2, 2)\n self.gradients[\"dB1\"] = np.sum(self.gradients[\"dA1\"], axis=0).reshape(1, -1) # (N, 2) -> (1, 2)\n \n def fit(self, X, Y, epochs=1, algo= \"GD\",l2_norm=False, lambda_val=0.8, display_loss=False, eta=1):\n train_accuracies={}\n val_accuracies={}\n if display_loss:\n loss = []\n weight_mag = []\n for num_epoch in tqdm_notebook(range(epochs), total=epochs, unit=\"epoch\"):\n m = X.shape[0]\n \n self.grad(X, Y)\n for i in range(1,self.num_layers+1):\n if l2_norm:\n self.params[\"W\"+str(i)] -= (eta * lambda_val)/m * self.params[\"W\"+str(i)] + eta * (self.gradients[\"dW\"+str(i)]/m)\n else:\n self.params[\"W\"+str(i)] -= eta * (self.gradients[\"dW\"+str(i)]/m)\n self.params[\"B\"+str(i)] -= eta * (self.gradients[\"dB\"+str(i)]/m)\n \n train_accuracy,val_accuracy=self.get_accuracy()\n train_accuracies[num_epoch]=train_accuracy\n val_accuracies[num_epoch]=val_accuracy\n if display_loss:\n Y_pred = self.predict(X)\n loss.append(log_loss(np.argmax(Y, axis=1), Y_pred))\n weight_mag.append((norm(self.params[\"W1\"]) + norm(self.params[\"W2\"]) + norm(self.params[\"B1\"]) + norm(self.params[\"B2\"]))/18)\n \n plt.plot(list(train_accuracies.values()),label=\"Train accuracy\")\n plt.plot(list(val_accuracies.values()),label=\"Validation accuracy\")\n plt.plot(np.ones((epochs, 1))*0.9)\n plt.plot(np.ones((epochs, 1))*0.33)\n plt.xlabel('Epochs')\n plt.ylabel('Accuracy')\n plt.legend()\n plt.show()\n \n if display_loss:\n fig, ax1 = plt.subplots()\n color = 'tab:red'\n ax1.set_xlabel('epochs')\n ax1.set_ylabel('Log Loss', color=color)\n ax1.plot(loss, '-o', color=color)\n ax1.tick_params(axis='y', labelcolor=color)\n ax2 = ax1.twinx() \n color = 'tab:blue'\n ax2.set_ylabel('Weight Magnitude', color=color) # we already handled the x-label with ax1\n ax2.plot(weight_mag, '-*', color=color)\n ax2.tick_params(axis='y', labelcolor=color)\n fig.tight_layout() \n plt.show()\n\n \n def predict(self, X):\n Y_pred = self.forward_pass(X)\n return np.array(Y_pred).squeeze() ",
"_____no_output_____"
],
[
"def print_accuracy(): \n Y_pred_train = model.predict(X_train)\n Y_pred_train = np.argmax(Y_pred_train,1)\n Y_pred_val = model.predict(X_val)\n Y_pred_val = np.argmax(Y_pred_val,1)\n accuracy_train = accuracy_score(Y_pred_train, Y_train)\n accuracy_val = accuracy_score(Y_pred_val, Y_val)\n print(\"Training accuracy\", round(accuracy_train, 4))\n print(\"Validation accuracy\", round(accuracy_val, 4))\n \n if False:\n plt.scatter(X_train[:,0], X_train[:,1], c=Y_pred_train, cmap=my_cmap, s=15*(np.abs(np.sign(Y_pred_train-Y_train))+.1))\n plt.show()",
"_____no_output_____"
],
[
"model = FFNetwork(num_hidden=1)\nmodel.fit(X_train, y_OH_train, epochs=100, eta=0.1)\nprint_accuracy()",
"_____no_output_____"
],
[
"model = FFNetwork(num_hidden=2)\nmodel.fit(X_train, y_OH_train, epochs=100, eta=1, display_loss=False)\nprint_accuracy()",
"_____no_output_____"
],
[
"model = FFNetwork(num_hidden=4)\nmodel.fit(X_train, y_OH_train, epochs=400, eta=0.25, display_loss=False)\nprint_accuracy()",
"_____no_output_____"
],
[
"model = FFNetwork(num_hidden=8)\nmodel.fit(X_train, y_OH_train, epochs=500, eta=0.2, display_loss=False)\nprint_accuracy()",
"_____no_output_____"
],
[
"model = FFNetwork(num_hidden=32)\nmodel.fit(X_train, y_OH_train, epochs=500, eta=0.2, display_loss=False)\nprint_accuracy()",
"_____no_output_____"
],
[
"model = FFNetwork(num_hidden=64)\nmodel.fit(X_train, y_OH_train, epochs=2000, eta=0.1, l2_norm=False)\nprint_accuracy()",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
],
[
[
"## Add L2 Regularization",
"_____no_output_____"
]
],
[
[
"model = FFNetwork(num_hidden=64)\nmodel.fit(X_train, y_OH_train, epochs=2000, eta=0.1, l2_norm=True, lambda_val=0.1, display_loss=True)\nprint_accuracy()",
"_____no_output_____"
],
[
"model = FFNetwork(num_hidden=64)\nmodel.fit(X_train, y_OH_train, epochs=2000, eta=0.1, l2_norm=True, lambda_val=1, display_loss=True)\nprint_accuracy()",
"_____no_output_____"
],
[
"model = FFNetwork(num_hidden=64)\nmodel.fit(X_train, y_OH_train, epochs=2000, eta=0.1, l2_norm=True, lambda_val=5, display_loss=True)\nprint_accuracy()",
"_____no_output_____"
],
[
"model = FFNetwork(num_hidden=64)\nmodel.fit(X_train, y_OH_train, epochs=2000, eta=0.1, l2_norm=True, lambda_val=10, display_loss=True)\nprint_accuracy()",
"_____no_output_____"
]
],
[
[
"\n## Add noise to training data set",
"_____no_output_____"
]
],
[
[
"model = FFNetwork(num_hidden=64)\nmodel.fit(X_train, y_OH_train, epochs=2000, eta=0.1, l2_norm=False)\nprint_accuracy()",
"_____no_output_____"
],
[
"for noise_fraction in [0.01, 0.05, 0.1, 0.15, 0.18, 0.2]:\n print(noise_fraction)\n X_train_noisy = X_train * (1 - noise_fraction*np.random.randn(X_train.shape[0], X_train.shape[1]))\n model = FFNetwork(num_hidden=64)\n model.fit(X_train_noisy, y_OH_train, epochs=2000, eta=0.1, l2_norm=False)\n print_accuracy()",
"0.01\n"
]
],
[
[
"## Early stopping",
"_____no_output_____"
]
],
[
[
"model = FFNetwork(num_hidden=32)\nmodel.fit(X_train, y_OH_train, epochs=500, eta=0.2, display_loss=True)\nprint_accuracy()",
"_____no_output_____"
],
[
"model = FFNetwork(num_hidden=32)\nmodel.fit(X_train, y_OH_train, epochs=100, eta=0.2, display_loss=True)\nprint_accuracy()",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
4a1985e24084583721b78df63e08e7b549edb35e
| 933,437 |
ipynb
|
Jupyter Notebook
|
2. Define the Network Architecture.ipynb
|
aarti9/P1_Facial_Keypoints
|
50d683d3756d79012b36c19c813f32c0d6a4546c
|
[
"MIT"
] | null | null | null |
2. Define the Network Architecture.ipynb
|
aarti9/P1_Facial_Keypoints
|
50d683d3756d79012b36c19c813f32c0d6a4546c
|
[
"MIT"
] | null | null | null |
2. Define the Network Architecture.ipynb
|
aarti9/P1_Facial_Keypoints
|
50d683d3756d79012b36c19c813f32c0d6a4546c
|
[
"MIT"
] | 1 |
2020-05-24T14:52:03.000Z
|
2020-05-24T14:52:03.000Z
| 128.696677 | 142,276 | 0.826328 |
[
[
[
"## Define the Convolutional Neural Network\n\nAfter you've looked at the data you're working with and, in this case, know the shapes of the images and of the keypoints, you are ready to define a convolutional neural network that can *learn* from this data.\n\nIn this notebook and in `models.py`, you will:\n1. Define a CNN with images as input and keypoints as output\n2. Construct the transformed FaceKeypointsDataset, just as before\n3. Train the CNN on the training data, tracking loss\n4. See how the trained model performs on test data\n5. If necessary, modify the CNN structure and model hyperparameters, so that it performs *well* **\\***\n\n**\\*** What does *well* mean?\n\n\"Well\" means that the model's loss decreases during training **and**, when applied to test image data, the model produces keypoints that closely match the true keypoints of each face. And you'll see examples of this later in the notebook.\n\n---\n",
"_____no_output_____"
],
[
"## CNN Architecture\n\nRecall that CNN's are defined by a few types of layers:\n* Convolutional layers\n* Maxpooling layers\n* Fully-connected layers\n\nYou are required to use the above layers and encouraged to add multiple convolutional layers and things like dropout layers that may prevent overfitting. You are also encouraged to look at literature on keypoint detection, such as [this paper](https://arxiv.org/pdf/1710.00977.pdf), to help you determine the structure of your network.\n\n\n### TODO: Define your model in the provided file `models.py` file\n\nThis file is mostly empty but contains the expected name and some TODO's for creating your model.\n\n---",
"_____no_output_____"
],
[
"## PyTorch Neural Nets\n\nTo define a neural network in PyTorch, you define the layers of a model in the function `__init__` and define the feedforward behavior of a network that employs those initialized layers in the function `forward`, which takes in an input image tensor, `x`. The structure of this Net class is shown below and left for you to fill in.\n\nNote: During training, PyTorch will be able to perform backpropagation by keeping track of the network's feedforward behavior and using autograd to calculate the update to the weights in the network.\n\n#### Define the Layers in ` __init__`\nAs a reminder, a conv/pool layer may be defined like this (in `__init__`):\n```\n# 1 input image channel (for grayscale images), 32 output channels/feature maps, 3x3 square convolution kernel\nself.conv1 = nn.Conv2d(1, 32, 3)\n\n# maxpool that uses a square window of kernel_size=2, stride=2\nself.pool = nn.MaxPool2d(2, 2) \n```\n\n#### Refer to Layers in `forward`\nThen referred to in the `forward` function like this, in which the conv1 layer has a ReLu activation applied to it before maxpooling is applied:\n```\nx = self.pool(F.relu(self.conv1(x)))\n```\n\nBest practice is to place any layers whose weights will change during the training process in `__init__` and refer to them in the `forward` function; any layers or functions that always behave in the same way, such as a pre-defined activation function, should appear *only* in the `forward` function.",
"_____no_output_____"
],
[
"#### Why models.py\n\nYou are tasked with defining the network in the `models.py` file so that any models you define can be saved and loaded by name in different notebooks in this project directory. For example, by defining a CNN class called `Net` in `models.py`, you can then create that same architecture in this and other notebooks by simply importing the class and instantiating a model:\n```\n from models import Net\n net = Net()\n```",
"_____no_output_____"
]
],
[
[
"# load the data if you need to; if you have already loaded the data, you may comment this cell out\n# -- DO NOT CHANGE THIS CELL -- #\n!mkdir /data\n!wget -P /data/ https://s3.amazonaws.com/video.udacity-data.com/topher/2018/May/5aea1b91_train-test-data/train-test-data.zip\n!unzip -n /data/train-test-data.zip -d /data",
"--2019-05-15 14:37:57-- https://s3.amazonaws.com/video.udacity-data.com/topher/2018/May/5aea1b91_train-test-data/train-test-data.zip\nResolving s3.amazonaws.com (s3.amazonaws.com)... 52.216.112.141\nConnecting to s3.amazonaws.com (s3.amazonaws.com)|52.216.112.141|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 338613624 (323M) [application/zip]\nSaving to: ‘/data/train-test-data.zip’\n\ntrain-test-data.zip 100%[===================>] 322.93M 74.8MB/s in 4.5s \n\n2019-05-15 14:38:02 (72.2 MB/s) - ‘/data/train-test-data.zip’ saved [338613624/338613624]\n\nArchive: /data/train-test-data.zip\n creating: /data/test/\n inflating: /data/test/Abdel_Aziz_Al-Hakim_00.jpg \n inflating: /data/test/Abdel_Aziz_Al-Hakim_01.jpg \n inflating: /data/test/Abdel_Aziz_Al-Hakim_10.jpg \n inflating: /data/test/Abdel_Aziz_Al-Hakim_11.jpg \n inflating: /data/test/Abdel_Aziz_Al-Hakim_40.jpg \n inflating: /data/test/Abdel_Aziz_Al-Hakim_41.jpg \n inflating: /data/test/Abdullah_Gul_10.jpg \n inflating: /data/test/Abdullah_Gul_11.jpg \n inflating: /data/test/Abdullah_Gul_30.jpg \n inflating: /data/test/Abdullah_Gul_31.jpg \n inflating: /data/test/Abdullah_Gul_50.jpg \n inflating: /data/test/Abdullah_Gul_51.jpg \n inflating: /data/test/Adam_Sandler_00.jpg \n inflating: /data/test/Adam_Sandler_01.jpg \n inflating: /data/test/Adam_Sandler_10.jpg \n inflating: /data/test/Adam_Sandler_11.jpg \n inflating: /data/test/Adam_Sandler_40.jpg \n inflating: /data/test/Adam_Sandler_41.jpg \n inflating: /data/test/Adrian_Nastase_10.jpg \n inflating: /data/test/Adrian_Nastase_11.jpg \n inflating: /data/test/Adrian_Nastase_40.jpg \n inflating: /data/test/Adrian_Nastase_41.jpg \n inflating: /data/test/Adrian_Nastase_50.jpg \n inflating: /data/test/Adrian_Nastase_51.jpg \n inflating: /data/test/Agbani_Darego_00.jpg \n inflating: /data/test/Agbani_Darego_01.jpg \n inflating: /data/test/Agbani_Darego_20.jpg \n inflating: /data/test/Agbani_Darego_21.jpg \n inflating: /data/test/Agbani_Darego_40.jpg \n inflating: /data/test/Agbani_Darego_41.jpg \n inflating: /data/test/Agbani_Darego_50.jpg \n inflating: /data/test/Agbani_Darego_51.jpg \n inflating: /data/test/Agnes_Bruckner_00.jpg \n inflating: /data/test/Agnes_Bruckner_01.jpg \n inflating: /data/test/Agnes_Bruckner_10.jpg \n inflating: /data/test/Agnes_Bruckner_11.jpg \n inflating: /data/test/Agnes_Bruckner_20.jpg \n inflating: /data/test/Agnes_Bruckner_21.jpg \n inflating: /data/test/Agnes_Bruckner_40.jpg \n inflating: /data/test/Agnes_Bruckner_41.jpg \n inflating: /data/test/Ahmad_Masood_00.jpg \n inflating: /data/test/Ahmad_Masood_01.jpg \n inflating: /data/test/Ahmad_Masood_30.jpg \n inflating: /data/test/Ahmad_Masood_31.jpg \n inflating: /data/test/Ahmad_Masood_40.jpg \n inflating: /data/test/Ahmad_Masood_41.jpg \n inflating: /data/test/Ahmed_Ahmed_00.jpg \n inflating: /data/test/Ahmed_Ahmed_01.jpg \n inflating: /data/test/Ahmed_Ahmed_10.jpg \n inflating: /data/test/Ahmed_Ahmed_11.jpg \n inflating: /data/test/Ahmed_Ahmed_40.jpg \n inflating: /data/test/Ahmed_Ahmed_41.jpg \n inflating: /data/test/Ahmed_Ahmed_50.jpg \n inflating: /data/test/Ahmed_Ahmed_51.jpg \n inflating: /data/test/Aidan_Quinn_00.jpg \n inflating: /data/test/Aidan_Quinn_01.jpg \n inflating: /data/test/Aidan_Quinn_10.jpg \n inflating: /data/test/Aidan_Quinn_11.jpg \n inflating: /data/test/Aidan_Quinn_20.jpg \n inflating: /data/test/Aidan_Quinn_21.jpg \n inflating: /data/test/Aidan_Quinn_30.jpg \n inflating: /data/test/Aidan_Quinn_31.jpg \n inflating: /data/test/Aishwarya_Rai_00.jpg \n inflating: /data/test/Aishwarya_Rai_01.jpg \n inflating: /data/test/Aishwarya_Rai_10.jpg \n inflating: /data/test/Aishwarya_Rai_11.jpg \n inflating: /data/test/Aishwarya_Rai_40.jpg \n inflating: /data/test/Aishwarya_Rai_41.jpg \n inflating: /data/test/Aishwarya_Rai_50.jpg \n inflating: /data/test/Aishwarya_Rai_51.jpg \n inflating: /data/test/Albert_Brooks_00.jpg \n inflating: /data/test/Albert_Brooks_01.jpg \n inflating: /data/test/Albert_Brooks_10.jpg \n inflating: /data/test/Albert_Brooks_11.jpg \n inflating: /data/test/Albert_Brooks_30.jpg \n inflating: /data/test/Albert_Brooks_31.jpg \n inflating: /data/test/Alejandro_Toledo_10.jpg \n inflating: /data/test/Alejandro_Toledo_11.jpg \n inflating: /data/test/Alejandro_Toledo_30.jpg \n inflating: /data/test/Alejandro_Toledo_31.jpg \n inflating: /data/test/Alejandro_Toledo_50.jpg \n inflating: /data/test/Alejandro_Toledo_51.jpg \n inflating: /data/test/Aleksander_Kwasniewski_00.jpg \n inflating: /data/test/Aleksander_Kwasniewski_01.jpg \n inflating: /data/test/Aleksander_Kwasniewski_10.jpg \n inflating: /data/test/Aleksander_Kwasniewski_11.jpg \n inflating: /data/test/Aleksander_Kwasniewski_20.jpg \n inflating: /data/test/Aleksander_Kwasniewski_21.jpg \n inflating: /data/test/Aleksander_Kwasniewski_30.jpg \n inflating: /data/test/Aleksander_Kwasniewski_31.jpg \n inflating: /data/test/Alex_Ferguson_00.jpg \n inflating: /data/test/Alex_Ferguson_01.jpg \n inflating: /data/test/Alex_Ferguson_10.jpg \n inflating: /data/test/Alex_Ferguson_11.jpg \n inflating: /data/test/Alex_Ferguson_50.jpg \n inflating: /data/test/Alex_Ferguson_51.jpg \n inflating: /data/test/Alexandra_Pelosi_00.jpg \n inflating: /data/test/Alexandra_Pelosi_01.jpg \n inflating: /data/test/Alexandra_Pelosi_10.jpg \n inflating: /data/test/Alexandra_Pelosi_11.jpg \n inflating: /data/test/Alexandra_Pelosi_30.jpg \n inflating: /data/test/Alexandra_Pelosi_31.jpg \n inflating: /data/test/Alfredo_di_Stefano_00.jpg \n inflating: /data/test/Alfredo_di_Stefano_01.jpg \n inflating: /data/test/Alfredo_di_Stefano_20.jpg \n inflating: /data/test/Alfredo_di_Stefano_21.jpg \n inflating: /data/test/Alfredo_di_Stefano_50.jpg \n inflating: /data/test/Alfredo_di_Stefano_51.jpg \n inflating: /data/test/Ali_Abbas_20.jpg \n inflating: /data/test/Ali_Abbas_21.jpg \n inflating: /data/test/Ali_Abbas_30.jpg \n inflating: /data/test/Ali_Abbas_31.jpg \n inflating: /data/test/Ali_Abbas_40.jpg \n inflating: /data/test/Ali_Abbas_41.jpg \n inflating: /data/test/Ali_Abbas_50.jpg \n inflating: /data/test/Ali_Abbas_51.jpg \n inflating: /data/test/Alicia_Silverstone_00.jpg \n inflating: /data/test/Alicia_Silverstone_01.jpg \n inflating: /data/test/Alicia_Silverstone_10.jpg \n inflating: /data/test/Alicia_Silverstone_11.jpg \n inflating: /data/test/Alicia_Silverstone_20.jpg \n inflating: /data/test/Alicia_Silverstone_21.jpg \n inflating: /data/test/Alicia_Silverstone_50.jpg \n inflating: /data/test/Alicia_Silverstone_51.jpg \n inflating: /data/test/Alma_Powell_00.jpg \n inflating: /data/test/Alma_Powell_01.jpg \n inflating: /data/test/Alma_Powell_10.jpg \n inflating: /data/test/Alma_Powell_11.jpg \n inflating: /data/test/Alma_Powell_40.jpg \n inflating: /data/test/Alma_Powell_41.jpg \n inflating: /data/test/Alma_Powell_50.jpg \n inflating: /data/test/Alma_Powell_51.jpg \n inflating: /data/test/Alvaro_Silva_Calderon_00.jpg \n inflating: /data/test/Alvaro_Silva_Calderon_01.jpg \n inflating: /data/test/Alvaro_Silva_Calderon_10.jpg \n inflating: /data/test/Alvaro_Silva_Calderon_11.jpg \n inflating: /data/test/Alvaro_Silva_Calderon_20.jpg \n inflating: /data/test/Alvaro_Silva_Calderon_21.jpg \n inflating: /data/test/Alvaro_Silva_Calderon_30.jpg \n inflating: /data/test/Alvaro_Silva_Calderon_31.jpg \n inflating: /data/test/Amelia_Vega_10.jpg \n inflating: /data/test/Amelia_Vega_11.jpg \n inflating: /data/test/Amelia_Vega_20.jpg \n inflating: /data/test/Amelia_Vega_21.jpg \n inflating: /data/test/Amelia_Vega_30.jpg \n inflating: /data/test/Amelia_Vega_31.jpg \n inflating: /data/test/Amelia_Vega_40.jpg \n inflating: /data/test/Amelia_Vega_41.jpg \n inflating: /data/test/Amy_Brenneman_10.jpg \n inflating: /data/test/Amy_Brenneman_11.jpg \n inflating: /data/test/Amy_Brenneman_30.jpg \n inflating: /data/test/Amy_Brenneman_31.jpg \n inflating: /data/test/Amy_Brenneman_50.jpg \n inflating: /data/test/Amy_Brenneman_51.jpg \n inflating: /data/test/Andrea_Bocelli_10.jpg \n inflating: /data/test/Andrea_Bocelli_11.jpg \n inflating: /data/test/Andrea_Bocelli_20.jpg \n inflating: /data/test/Andrea_Bocelli_21.jpg \n inflating: /data/test/Andrea_Bocelli_30.jpg \n inflating: /data/test/Andrea_Bocelli_31.jpg \n inflating: /data/test/Andy_Roddick_20.jpg \n inflating: /data/test/Andy_Roddick_21.jpg \n inflating: /data/test/Andy_Roddick_40.jpg \n inflating: /data/test/Andy_Roddick_41.jpg \n inflating: /data/test/Andy_Roddick_50.jpg \n inflating: /data/test/Andy_Roddick_51.jpg \n inflating: /data/test/Andy_Rooney_10.jpg \n inflating: /data/test/Andy_Rooney_11.jpg \n inflating: /data/test/Andy_Rooney_20.jpg \n inflating: /data/test/Andy_Rooney_21.jpg \n inflating: /data/test/Andy_Rooney_50.jpg \n inflating: /data/test/Andy_Rooney_51.jpg \n inflating: /data/test/Angel_Lockward_30.jpg \n inflating: /data/test/Angel_Lockward_31.jpg \n inflating: /data/test/Angel_Lockward_40.jpg \n inflating: /data/test/Angel_Lockward_41.jpg \n inflating: /data/test/Angel_Lockward_50.jpg \n inflating: /data/test/Angel_Lockward_51.jpg \n inflating: /data/test/Angela_Bassett_20.jpg \n inflating: /data/test/Angela_Bassett_21.jpg \n inflating: /data/test/Angela_Bassett_30.jpg \n inflating: /data/test/Angela_Bassett_31.jpg \n inflating: /data/test/Angela_Bassett_40.jpg \n inflating: /data/test/Angela_Bassett_41.jpg \n inflating: /data/test/Angelo_Reyes_20.jpg \n inflating: /data/test/Angelo_Reyes_21.jpg \n inflating: /data/test/Angelo_Reyes_30.jpg \n inflating: /data/test/Angelo_Reyes_31.jpg \n inflating: /data/test/Angelo_Reyes_50.jpg \n inflating: /data/test/Angelo_Reyes_51.jpg \n inflating: /data/test/Baburam_Bhattari_00.jpg \n inflating: /data/test/Baburam_Bhattari_01.jpg \n inflating: /data/test/Baburam_Bhattari_20.jpg \n inflating: /data/test/Baburam_Bhattari_21.jpg \n inflating: /data/test/Baburam_Bhattari_30.jpg \n inflating: /data/test/Baburam_Bhattari_31.jpg \n inflating: /data/test/Barbara_Bodine_00.jpg \n inflating: /data/test/Barbara_Bodine_01.jpg \n inflating: /data/test/Barbara_Bodine_20.jpg \n inflating: /data/test/Barbara_Bodine_21.jpg \n inflating: /data/test/Barbara_Bodine_40.jpg \n inflating: /data/test/Barbara_Bodine_41.jpg \n inflating: /data/test/Barbara_Bodine_50.jpg \n inflating: /data/test/Barbara_Bodine_51.jpg \n inflating: /data/test/Barbara_Boxer_10.jpg \n inflating: /data/test/Barbara_Boxer_11.jpg \n inflating: /data/test/Barbara_Boxer_40.jpg \n inflating: /data/test/Barbara_Boxer_41.jpg \n inflating: /data/test/Barbara_Boxer_50.jpg \n inflating: /data/test/Barbara_Boxer_51.jpg \n inflating: /data/test/Barbara_Walters_00.jpg \n inflating: /data/test/Barbara_Walters_01.jpg \n inflating: /data/test/Barbara_Walters_20.jpg \n inflating: /data/test/Barbara_Walters_21.jpg \n inflating: /data/test/Barbara_Walters_40.jpg \n inflating: /data/test/Barbara_Walters_41.jpg \n inflating: /data/test/Barbara_Walters_50.jpg \n inflating: /data/test/Barbara_Walters_51.jpg \n inflating: /data/test/Barry_Alvarez_00.jpg \n inflating: /data/test/Barry_Alvarez_01.jpg \n inflating: /data/test/Barry_Alvarez_10.jpg \n inflating: /data/test/Barry_Alvarez_11.jpg \n inflating: /data/test/Barry_Alvarez_20.jpg \n inflating: /data/test/Barry_Alvarez_21.jpg \n inflating: /data/test/Barry_Alvarez_30.jpg \n inflating: /data/test/Barry_Alvarez_31.jpg \n inflating: /data/test/Ben_Kingsley_10.jpg \n inflating: /data/test/Ben_Kingsley_11.jpg \n inflating: /data/test/Ben_Kingsley_20.jpg \n inflating: /data/test/Ben_Kingsley_21.jpg \n inflating: /data/test/Ben_Kingsley_50.jpg \n inflating: /data/test/Ben_Kingsley_51.jpg \n inflating: /data/test/Ben_Stein_10.jpg \n inflating: /data/test/Ben_Stein_11.jpg \n inflating: /data/test/Ben_Stein_30.jpg \n inflating: /data/test/Ben_Stein_31.jpg \n inflating: /data/test/Ben_Stein_40.jpg \n inflating: /data/test/Ben_Stein_41.jpg \n inflating: /data/test/Ben_Stein_50.jpg \n inflating: /data/test/Ben_Stein_51.jpg \n inflating: /data/test/Benedita_da_Silva_10.jpg \n inflating: /data/test/Benedita_da_Silva_11.jpg \n inflating: /data/test/Benedita_da_Silva_20.jpg \n inflating: /data/test/Benedita_da_Silva_21.jpg \n inflating: /data/test/Benedita_da_Silva_50.jpg \n inflating: /data/test/Benedita_da_Silva_51.jpg \n inflating: /data/test/Benjamin_McKenzie_30.jpg \n inflating: /data/test/Benjamin_McKenzie_31.jpg \n inflating: /data/test/Benjamin_McKenzie_40.jpg \n inflating: /data/test/Benjamin_McKenzie_41.jpg \n inflating: /data/test/Benjamin_McKenzie_50.jpg \n inflating: /data/test/Benjamin_McKenzie_51.jpg \n inflating: /data/test/Benjamin_Netanyahu_00.jpg \n inflating: /data/test/Benjamin_Netanyahu_01.jpg \n inflating: /data/test/Benjamin_Netanyahu_10.jpg \n inflating: /data/test/Benjamin_Netanyahu_11.jpg \n inflating: /data/test/Benjamin_Netanyahu_30.jpg \n inflating: /data/test/Benjamin_Netanyahu_31.jpg \n inflating: /data/test/Benjamin_Netanyahu_40.jpg \n inflating: /data/test/Benjamin_Netanyahu_41.jpg \n inflating: /data/test/Beyonce_Knowles_00.jpg \n inflating: /data/test/Beyonce_Knowles_01.jpg \n inflating: /data/test/Beyonce_Knowles_30.jpg \n inflating: /data/test/Beyonce_Knowles_31.jpg \n inflating: /data/test/Beyonce_Knowles_50.jpg \n inflating: /data/test/Beyonce_Knowles_51.jpg \n inflating: /data/test/Bianca_Jagger_20.jpg \n inflating: /data/test/Bianca_Jagger_21.jpg \n inflating: /data/test/Bianca_Jagger_30.jpg \n inflating: /data/test/Bianca_Jagger_31.jpg \n inflating: /data/test/Bianca_Jagger_40.jpg \n inflating: /data/test/Bianca_Jagger_41.jpg \n inflating: /data/test/Biljana_Plavsic_00.jpg \n inflating: /data/test/Biljana_Plavsic_01.jpg \n inflating: /data/test/Biljana_Plavsic_10.jpg \n inflating: /data/test/Biljana_Plavsic_11.jpg \n inflating: /data/test/Biljana_Plavsic_30.jpg \n inflating: /data/test/Biljana_Plavsic_31.jpg \n inflating: /data/test/Bill_Bradley_10.jpg \n inflating: /data/test/Bill_Bradley_11.jpg \n inflating: /data/test/Bill_Bradley_30.jpg \n inflating: /data/test/Bill_Bradley_31.jpg \n inflating: /data/test/Bill_Bradley_50.jpg \n inflating: /data/test/Bill_Bradley_51.jpg \n inflating: /data/test/Bill_Clinton_00.jpg \n inflating: /data/test/Bill_Clinton_01.jpg \n inflating: /data/test/Bill_Clinton_10.jpg \n inflating: /data/test/Bill_Clinton_11.jpg \n inflating: /data/test/Bill_Clinton_50.jpg \n inflating: /data/test/Bill_Clinton_51.jpg \n inflating: /data/test/Bill_Frist_00.jpg \n inflating: /data/test/Bill_Frist_01.jpg \n inflating: /data/test/Bill_Frist_10.jpg \n inflating: /data/test/Bill_Frist_11.jpg \n inflating: /data/test/Bill_Frist_20.jpg \n inflating: /data/test/Bill_Frist_21.jpg \n inflating: /data/test/Cameron_Diaz_20.jpg \n inflating: /data/test/Cameron_Diaz_21.jpg \n inflating: /data/test/Cameron_Diaz_40.jpg \n inflating: /data/test/Cameron_Diaz_41.jpg \n inflating: /data/test/Cameron_Diaz_50.jpg \n inflating: /data/test/Cameron_Diaz_51.jpg \n inflating: /data/test/Carey_Lowell_00.jpg \n inflating: /data/test/Carey_Lowell_01.jpg \n inflating: /data/test/Carey_Lowell_20.jpg \n inflating: /data/test/Carey_Lowell_21.jpg \n inflating: /data/test/Carey_Lowell_30.jpg \n inflating: /data/test/Carey_Lowell_31.jpg \n inflating: /data/test/Carla_Gugino_10.jpg \n inflating: /data/test/Carla_Gugino_11.jpg \n inflating: /data/test/Carla_Gugino_30.jpg \n inflating: /data/test/Carla_Gugino_31.jpg \n inflating: /data/test/Carla_Gugino_50.jpg \n inflating: /data/test/Carla_Gugino_51.jpg \n inflating: /data/test/Carlo_Azeglio_Ciampi_00.jpg \n inflating: /data/test/Carlo_Azeglio_Ciampi_01.jpg \n inflating: /data/test/Carlo_Azeglio_Ciampi_10.jpg \n inflating: /data/test/Carlo_Azeglio_Ciampi_11.jpg \n inflating: /data/test/Carlo_Azeglio_Ciampi_20.jpg \n inflating: /data/test/Carlo_Azeglio_Ciampi_21.jpg \n inflating: /data/test/Carlo_Azeglio_Ciampi_50.jpg \n inflating: /data/test/Carlo_Azeglio_Ciampi_51.jpg \n inflating: /data/test/Carlos_Ghosn_10.jpg \n inflating: /data/test/Carlos_Ghosn_11.jpg \n inflating: /data/test/Carlos_Ghosn_20.jpg \n inflating: /data/test/Carlos_Ghosn_21.jpg \n inflating: /data/test/Carlos_Ghosn_40.jpg \n inflating: /data/test/Carlos_Ghosn_41.jpg \n inflating: /data/test/Carlos_Iturgaitz_10.jpg \n inflating: /data/test/Carlos_Iturgaitz_11.jpg \n inflating: /data/test/Carlos_Iturgaitz_20.jpg \n inflating: /data/test/Carlos_Iturgaitz_21.jpg \n inflating: /data/test/Carlos_Iturgaitz_30.jpg \n inflating: /data/test/Carlos_Iturgaitz_31.jpg \n inflating: /data/test/Carlos_Iturgaitz_50.jpg \n inflating: /data/test/Carlos_Iturgaitz_51.jpg \n inflating: /data/test/Carlos_Menem_00.jpg \n inflating: /data/test/Carlos_Menem_01.jpg \n inflating: /data/test/Carlos_Menem_20.jpg \n inflating: /data/test/Carlos_Menem_21.jpg \n inflating: /data/test/Carlos_Menem_30.jpg \n inflating: /data/test/Carlos_Menem_31.jpg \n inflating: /data/test/Carlos_Queiroz_00.jpg \n inflating: /data/test/Carlos_Queiroz_01.jpg \n inflating: /data/test/Carlos_Queiroz_10.jpg \n inflating: /data/test/Carlos_Queiroz_11.jpg \n inflating: /data/test/Carlos_Queiroz_50.jpg \n inflating: /data/test/Carlos_Queiroz_51.jpg \n inflating: /data/test/Carrie-Anne_Moss_00.jpg \n inflating: /data/test/Carrie-Anne_Moss_01.jpg \n inflating: /data/test/Carrie-Anne_Moss_10.jpg \n inflating: /data/test/Carrie-Anne_Moss_11.jpg \n inflating: /data/test/Carrie-Anne_Moss_50.jpg \n inflating: /data/test/Carrie-Anne_Moss_51.jpg \n inflating: /data/test/Catriona_Le_May_Doan_10.jpg \n inflating: /data/test/Catriona_Le_May_Doan_11.jpg \n inflating: /data/test/Catriona_Le_May_Doan_30.jpg \n inflating: /data/test/Catriona_Le_May_Doan_31.jpg \n inflating: /data/test/Catriona_Le_May_Doan_40.jpg \n inflating: /data/test/Catriona_Le_May_Doan_41.jpg \n inflating: /data/test/Cecilia_Cheung_00.jpg \n inflating: /data/test/Cecilia_Cheung_01.jpg \n inflating: /data/test/Cecilia_Cheung_10.jpg \n inflating: /data/test/Cecilia_Cheung_11.jpg \n inflating: /data/test/Cecilia_Cheung_20.jpg \n inflating: /data/test/Cecilia_Cheung_21.jpg \n inflating: /data/test/Cecilia_Cheung_50.jpg \n inflating: /data/test/Cecilia_Cheung_51.jpg \n inflating: /data/test/Celso_Amorim_10.jpg \n inflating: /data/test/Celso_Amorim_11.jpg \n inflating: /data/test/Celso_Amorim_40.jpg \n inflating: /data/test/Celso_Amorim_41.jpg \n inflating: /data/test/Celso_Amorim_50.jpg \n inflating: /data/test/Celso_Amorim_51.jpg \n inflating: /data/test/Celso_Lafer_00.jpg \n inflating: /data/test/Celso_Lafer_01.jpg \n inflating: /data/test/Celso_Lafer_10.jpg \n inflating: /data/test/Celso_Lafer_11.jpg \n inflating: /data/test/Celso_Lafer_20.jpg \n inflating: /data/test/Celso_Lafer_21.jpg \n inflating: /data/test/Chadha_Gurinder_10.jpg \n inflating: /data/test/Chadha_Gurinder_11.jpg \n inflating: /data/test/Chadha_Gurinder_20.jpg \n inflating: /data/test/Chadha_Gurinder_21.jpg \n inflating: /data/test/Chadha_Gurinder_50.jpg \n inflating: /data/test/Chadha_Gurinder_51.jpg \n inflating: /data/test/Charles_Bronson_00.jpg \n inflating: /data/test/Charles_Bronson_01.jpg \n inflating: /data/test/Charles_Bronson_10.jpg \n inflating: /data/test/Charles_Bronson_11.jpg \n inflating: /data/test/Charles_Bronson_50.jpg \n inflating: /data/test/Charles_Bronson_51.jpg \n inflating: /data/test/Charlie_Coles_00.jpg \n inflating: /data/test/Charlie_Coles_01.jpg \n inflating: /data/test/Charlie_Coles_10.jpg \n inflating: /data/test/Charlie_Coles_11.jpg \n inflating: /data/test/Charlie_Coles_20.jpg \n inflating: /data/test/Charlie_Coles_21.jpg \n inflating: /data/test/Charlize_Theron_10.jpg \n inflating: /data/test/Charlize_Theron_11.jpg \n inflating: /data/test/Charlize_Theron_30.jpg \n inflating: /data/test/Charlize_Theron_31.jpg \n inflating: /data/test/Charlize_Theron_50.jpg \n inflating: /data/test/Charlize_Theron_51.jpg \n inflating: /data/test/Charlotte_Casiraghi_00.jpg \n inflating: /data/test/Charlotte_Casiraghi_01.jpg \n inflating: /data/test/Charlotte_Casiraghi_10.jpg \n inflating: /data/test/Charlotte_Casiraghi_11.jpg \n inflating: /data/test/Charlotte_Casiraghi_20.jpg \n inflating: /data/test/Charlotte_Casiraghi_21.jpg \n inflating: /data/test/Charlotte_Rampling_00.jpg \n inflating: /data/test/Charlotte_Rampling_01.jpg \n inflating: /data/test/Charlotte_Rampling_30.jpg \n inflating: /data/test/Charlotte_Rampling_31.jpg \n inflating: /data/test/Charlotte_Rampling_40.jpg \n inflating: /data/test/Charlotte_Rampling_41.jpg \n inflating: /data/test/Charlotte_Rampling_50.jpg \n inflating: /data/test/Charlotte_Rampling_51.jpg \n inflating: /data/test/Cherie_Blair_00.jpg \n inflating: /data/test/Cherie_Blair_01.jpg \n inflating: /data/test/Cherie_Blair_20.jpg \n inflating: /data/test/Cherie_Blair_21.jpg \n inflating: /data/test/Cherie_Blair_30.jpg \n inflating: /data/test/Cherie_Blair_31.jpg \n inflating: /data/test/Cherie_Blair_40.jpg \n inflating: /data/test/Cherie_Blair_41.jpg \n inflating: /data/test/Chita_Rivera_00.jpg \n inflating: /data/test/Chita_Rivera_01.jpg \n inflating: /data/test/Chita_Rivera_10.jpg \n inflating: /data/test/Chita_Rivera_11.jpg \n inflating: /data/test/Chita_Rivera_30.jpg \n inflating: /data/test/Chita_Rivera_31.jpg \n inflating: /data/test/Chris_Cirino_20.jpg \n inflating: /data/test/Chris_Cirino_21.jpg \n inflating: /data/test/Chris_Cirino_30.jpg \n inflating: /data/test/Chris_Cirino_31.jpg \n inflating: /data/test/Chris_Cirino_50.jpg \n inflating: /data/test/Chris_Cirino_51.jpg \n inflating: /data/test/Chris_Cooper_00.jpg \n inflating: /data/test/Chris_Cooper_01.jpg \n inflating: /data/test/Chris_Cooper_30.jpg \n inflating: /data/test/Chris_Cooper_31.jpg \n inflating: /data/test/Chris_Cooper_40.jpg \n inflating: /data/test/Chris_Cooper_41.jpg \n inflating: /data/test/Chris_Matthews_00.jpg \n inflating: /data/test/Chris_Matthews_01.jpg \n inflating: /data/test/Chris_Matthews_10.jpg \n inflating: /data/test/Chris_Matthews_11.jpg \n inflating: /data/test/Chris_Matthews_30.jpg \n inflating: /data/test/Chris_Matthews_31.jpg \n inflating: /data/test/Chris_Matthews_50.jpg \n inflating: /data/test/Chris_Matthews_51.jpg \n inflating: /data/test/Chris_Noth_00.jpg \n inflating: /data/test/Chris_Noth_01.jpg \n inflating: /data/test/Chris_Noth_10.jpg \n inflating: /data/test/Chris_Noth_11.jpg \n inflating: /data/test/Chris_Noth_30.jpg \n inflating: /data/test/Chris_Noth_31.jpg \n inflating: /data/test/Chris_Rock_00.jpg \n inflating: /data/test/Chris_Rock_01.jpg \n inflating: /data/test/Chris_Rock_10.jpg \n inflating: /data/test/Chris_Rock_11.jpg \n inflating: /data/test/Chris_Rock_20.jpg \n inflating: /data/test/Chris_Rock_21.jpg \n inflating: /data/test/Christine_Ebersole_00.jpg \n inflating: /data/test/Christine_Ebersole_01.jpg \n inflating: /data/test/Christine_Ebersole_20.jpg \n inflating: /data/test/Christine_Ebersole_21.jpg \n inflating: /data/test/Christine_Ebersole_50.jpg \n inflating: /data/test/Christine_Ebersole_51.jpg \n inflating: /data/test/Christopher_Amolsch_20.jpg \n inflating: /data/test/Christopher_Amolsch_21.jpg \n inflating: /data/test/Christopher_Amolsch_40.jpg \n inflating: /data/test/Christopher_Amolsch_41.jpg \n inflating: /data/test/Christopher_Amolsch_50.jpg \n inflating: /data/test/Christopher_Amolsch_51.jpg \n inflating: /data/test/Christopher_Reeve_10.jpg \n inflating: /data/test/Christopher_Reeve_11.jpg \n inflating: /data/test/Christopher_Reeve_20.jpg \n inflating: /data/test/Christopher_Reeve_21.jpg \n inflating: /data/test/Christopher_Reeve_40.jpg \n inflating: /data/test/Christopher_Reeve_41.jpg \n inflating: /data/test/Christopher_Walken_00.jpg \n inflating: /data/test/Christopher_Walken_01.jpg \n inflating: /data/test/Christopher_Walken_20.jpg \n inflating: /data/test/Christopher_Walken_21.jpg \n inflating: /data/test/Christopher_Walken_40.jpg \n inflating: /data/test/Christopher_Walken_41.jpg \n inflating: /data/test/Christopher_Walken_50.jpg \n inflating: /data/test/Christopher_Walken_51.jpg \n inflating: /data/test/Chuck_Hagel_20.jpg \n inflating: /data/test/Chuck_Hagel_21.jpg \n inflating: /data/test/Chuck_Hagel_30.jpg \n inflating: /data/test/Chuck_Hagel_31.jpg \n inflating: /data/test/Chuck_Hagel_40.jpg \n inflating: /data/test/Chuck_Hagel_41.jpg \n inflating: /data/test/Chuck_Hagel_50.jpg \n inflating: /data/test/Chuck_Hagel_51.jpg \n inflating: /data/test/Chuck_Woolery_10.jpg \n inflating: /data/test/Chuck_Woolery_11.jpg \n inflating: /data/test/Chuck_Woolery_20.jpg \n inflating: /data/test/Chuck_Woolery_21.jpg \n inflating: /data/test/Chuck_Woolery_40.jpg \n inflating: /data/test/Chuck_Woolery_41.jpg \n inflating: /data/test/Cindy_Crawford_00.jpg \n inflating: /data/test/Cindy_Crawford_01.jpg \n inflating: /data/test/Cindy_Crawford_30.jpg \n inflating: /data/test/Cindy_Crawford_31.jpg \n inflating: /data/test/Cindy_Crawford_50.jpg \n inflating: /data/test/Cindy_Crawford_51.jpg \n inflating: /data/test/Cindy_Klassen_00.jpg \n inflating: /data/test/Cindy_Klassen_01.jpg \n inflating: /data/test/Cindy_Klassen_30.jpg \n inflating: /data/test/Cindy_Klassen_31.jpg \n inflating: /data/test/Cindy_Klassen_40.jpg \n inflating: /data/test/Cindy_Klassen_41.jpg \n inflating: /data/test/Claire_Danes_30.jpg \n inflating: /data/test/Claire_Danes_31.jpg \n inflating: /data/test/Claire_Danes_40.jpg \n inflating: /data/test/Claire_Danes_41.jpg \n inflating: /data/test/Claire_Danes_50.jpg \n inflating: /data/test/Claire_Danes_51.jpg \n inflating: /data/test/Clark_Randt_10.jpg \n inflating: /data/test/Clark_Randt_11.jpg \n inflating: /data/test/Clark_Randt_20.jpg \n inflating: /data/test/Clark_Randt_21.jpg \n inflating: /data/test/Clark_Randt_40.jpg \n inflating: /data/test/Clark_Randt_41.jpg \n inflating: /data/test/Clark_Randt_50.jpg \n inflating: /data/test/Clark_Randt_51.jpg \n inflating: /data/test/Clay_Aiken_00.jpg \n inflating: /data/test/Clay_Aiken_01.jpg \n inflating: /data/test/Clay_Aiken_30.jpg \n inflating: /data/test/Clay_Aiken_31.jpg \n inflating: /data/test/Clay_Aiken_40.jpg \n inflating: /data/test/Clay_Aiken_41.jpg \n inflating: /data/test/Clay_Aiken_50.jpg \n inflating: /data/test/Clay_Aiken_51.jpg \n inflating: /data/test/Clint_Howard_00.jpg \n inflating: /data/test/Clint_Howard_01.jpg \n inflating: /data/test/Clint_Howard_10.jpg \n inflating: /data/test/Clint_Howard_11.jpg \n inflating: /data/test/Clint_Howard_20.jpg \n inflating: /data/test/Clint_Howard_21.jpg \n inflating: /data/test/Clint_Howard_30.jpg \n inflating: /data/test/Clint_Howard_31.jpg \n inflating: /data/test/Clive_Lloyd_30.jpg \n inflating: /data/test/Clive_Lloyd_31.jpg \n inflating: /data/test/Clive_Lloyd_40.jpg \n inflating: /data/test/Clive_Lloyd_41.jpg \n inflating: /data/test/Clive_Lloyd_50.jpg \n inflating: /data/test/Clive_Lloyd_51.jpg \n inflating: /data/test/Colin_Powell_10.jpg \n inflating: /data/test/Colin_Powell_11.jpg \n inflating: /data/test/Colin_Powell_40.jpg \n inflating: /data/test/Colin_Powell_41.jpg \n inflating: /data/test/Colin_Powell_50.jpg \n inflating: /data/test/Colin_Powell_51.jpg \n inflating: /data/test/Conan_OBrien_00.jpg \n inflating: /data/test/Conan_OBrien_01.jpg \n inflating: /data/test/Conan_OBrien_10.jpg \n inflating: /data/test/Conan_OBrien_11.jpg \n inflating: /data/test/Conan_OBrien_50.jpg \n inflating: /data/test/Conan_OBrien_51.jpg \n inflating: /data/test/Condoleezza_Rice_10.jpg \n inflating: /data/test/Condoleezza_Rice_11.jpg \n inflating: /data/test/Condoleezza_Rice_20.jpg \n inflating: /data/test/Condoleezza_Rice_21.jpg \n inflating: /data/test/Condoleezza_Rice_30.jpg \n inflating: /data/test/Condoleezza_Rice_31.jpg \n inflating: /data/test/Condoleezza_Rice_40.jpg \n inflating: /data/test/Condoleezza_Rice_41.jpg \n inflating: /data/test/Connie_Chung_20.jpg \n inflating: /data/test/Connie_Chung_21.jpg \n inflating: /data/test/Connie_Chung_30.jpg \n inflating: /data/test/Connie_Chung_31.jpg \n inflating: /data/test/Connie_Chung_40.jpg \n inflating: /data/test/Connie_Chung_41.jpg \n inflating: /data/test/Connie_Chung_50.jpg \n inflating: /data/test/Connie_Chung_51.jpg \n inflating: /data/test/Craig_David_10.jpg \n inflating: /data/test/Craig_David_11.jpg \n inflating: /data/test/Craig_David_20.jpg \n inflating: /data/test/Craig_David_21.jpg \n inflating: /data/test/Craig_David_30.jpg \n inflating: /data/test/Craig_David_31.jpg \n inflating: /data/test/Craig_David_50.jpg \n inflating: /data/test/Craig_David_51.jpg \n inflating: /data/test/Cristina_Fernandez_30.jpg \n inflating: /data/test/Cristina_Fernandez_31.jpg \n inflating: /data/test/Cristina_Fernandez_40.jpg \n inflating: /data/test/Cristina_Fernandez_41.jpg \n inflating: /data/test/Cristina_Fernandez_50.jpg \n inflating: /data/test/Cristina_Fernandez_51.jpg \n inflating: /data/test/Cristina_Saralegui_00.jpg \n inflating: /data/test/Cristina_Saralegui_01.jpg \n inflating: /data/test/Cristina_Saralegui_30.jpg \n inflating: /data/test/Cristina_Saralegui_31.jpg \n inflating: /data/test/Cristina_Saralegui_50.jpg \n inflating: /data/test/Cristina_Saralegui_51.jpg \n inflating: /data/test/Dai_Bachtiar_00.jpg \n inflating: /data/test/Dai_Bachtiar_01.jpg \n inflating: /data/test/Dai_Bachtiar_10.jpg \n inflating: /data/test/Dai_Bachtiar_11.jpg \n inflating: /data/test/Dai_Bachtiar_20.jpg \n inflating: /data/test/Dai_Bachtiar_21.jpg \n inflating: /data/test/Dai_Bachtiar_50.jpg \n inflating: /data/test/Dai_Bachtiar_51.jpg \n inflating: /data/test/Daisy_Fuentes_20.jpg \n inflating: /data/test/Daisy_Fuentes_21.jpg \n inflating: /data/test/Daisy_Fuentes_30.jpg \n inflating: /data/test/Daisy_Fuentes_31.jpg \n inflating: /data/test/Daisy_Fuentes_40.jpg \n inflating: /data/test/Daisy_Fuentes_41.jpg \n inflating: /data/test/Dan_Ackroyd_00.jpg \n inflating: /data/test/Dan_Ackroyd_01.jpg \n inflating: /data/test/Dan_Ackroyd_20.jpg \n inflating: /data/test/Dan_Ackroyd_21.jpg \n inflating: /data/test/Dan_Ackroyd_30.jpg \n inflating: /data/test/Dan_Ackroyd_31.jpg \n inflating: /data/test/Daniel_Radcliffe_00.jpg \n inflating: /data/test/Daniel_Radcliffe_01.jpg \n inflating: /data/test/Daniel_Radcliffe_20.jpg \n inflating: /data/test/Daniel_Radcliffe_21.jpg \n inflating: /data/test/Daniel_Radcliffe_50.jpg \n inflating: /data/test/Daniel_Radcliffe_51.jpg \n inflating: /data/test/Daniel_Rouse_00.jpg \n inflating: /data/test/Daniel_Rouse_01.jpg \n inflating: /data/test/Daniel_Rouse_10.jpg \n inflating: /data/test/Daniel_Rouse_11.jpg \n inflating: /data/test/Daniel_Rouse_20.jpg \n inflating: /data/test/Daniel_Rouse_21.jpg \n inflating: /data/test/Daniel_Rouse_30.jpg \n inflating: /data/test/Daniel_Rouse_31.jpg \n inflating: /data/test/Daniell_Sunjata_10.jpg \n inflating: /data/test/Daniell_Sunjata_11.jpg \n inflating: /data/test/Daniell_Sunjata_20.jpg \n inflating: /data/test/Daniell_Sunjata_21.jpg \n inflating: /data/test/Daniell_Sunjata_40.jpg \n inflating: /data/test/Daniell_Sunjata_41.jpg \n inflating: /data/test/Danny_Glover_10.jpg \n inflating: /data/test/Danny_Glover_11.jpg \n inflating: /data/test/Danny_Glover_30.jpg \n inflating: /data/test/Danny_Glover_31.jpg \n inflating: /data/test/Danny_Glover_50.jpg \n inflating: /data/test/Danny_Glover_51.jpg \n inflating: /data/test/Darrell_Issa_00.jpg \n inflating: /data/test/Darrell_Issa_01.jpg \n inflating: /data/test/Darrell_Issa_20.jpg \n inflating: /data/test/Darrell_Issa_21.jpg \n inflating: /data/test/Darrell_Issa_30.jpg \n inflating: /data/test/Darrell_Issa_31.jpg \n inflating: /data/test/Darrell_Issa_40.jpg \n inflating: /data/test/Darrell_Issa_41.jpg \n inflating: /data/test/Dave_Campo_10.jpg \n inflating: /data/test/Dave_Campo_11.jpg \n inflating: /data/test/Dave_Campo_20.jpg \n inflating: /data/test/Dave_Campo_21.jpg \n inflating: /data/test/Dave_Campo_30.jpg \n inflating: /data/test/Dave_Campo_31.jpg \n inflating: /data/test/David_Brent_00.jpg \n inflating: /data/test/David_Brent_01.jpg \n inflating: /data/test/David_Brent_10.jpg \n inflating: /data/test/David_Brent_11.jpg \n inflating: /data/test/David_Brent_20.jpg \n inflating: /data/test/David_Brent_21.jpg \n inflating: /data/test/David_Brent_30.jpg \n inflating: /data/test/David_Brent_31.jpg \n inflating: /data/test/David_Caruso_00.jpg \n inflating: /data/test/David_Caruso_01.jpg \n inflating: /data/test/David_Caruso_10.jpg \n inflating: /data/test/David_Caruso_11.jpg \n inflating: /data/test/David_Caruso_30.jpg \n inflating: /data/test/David_Caruso_31.jpg \n inflating: /data/test/David_Caruso_40.jpg \n inflating: /data/test/David_Caruso_41.jpg \n inflating: /data/test/Ed_Rendell_00.jpg \n inflating: /data/test/Ed_Rendell_01.jpg \n inflating: /data/test/Ed_Rendell_20.jpg \n inflating: /data/test/Ed_Rendell_21.jpg \n inflating: /data/test/Ed_Rendell_50.jpg \n inflating: /data/test/Ed_Rendell_51.jpg \n inflating: /data/test/Ed_Smart_10.jpg \n inflating: /data/test/Ed_Smart_11.jpg \n inflating: /data/test/Ed_Smart_30.jpg \n inflating: /data/test/Ed_Smart_31.jpg \n inflating: /data/test/Ed_Smart_50.jpg \n inflating: /data/test/Ed_Smart_51.jpg \n inflating: /data/test/Edie_Falco_20.jpg \n inflating: /data/test/Edie_Falco_21.jpg \n inflating: /data/test/Edie_Falco_30.jpg \n inflating: /data/test/Edie_Falco_31.jpg \n inflating: /data/test/Edie_Falco_40.jpg \n inflating: /data/test/Edie_Falco_41.jpg \n inflating: /data/test/Edie_Falco_50.jpg \n inflating: /data/test/Edie_Falco_51.jpg \n inflating: /data/test/Eduardo_Duhalde_00.jpg \n inflating: /data/test/Eduardo_Duhalde_01.jpg \n inflating: /data/test/Eduardo_Duhalde_10.jpg \n inflating: /data/test/Eduardo_Duhalde_11.jpg \n inflating: /data/test/Eduardo_Duhalde_30.jpg \n inflating: /data/test/Eduardo_Duhalde_31.jpg \n inflating: /data/test/Edward_Burns_10.jpg \n inflating: /data/test/Edward_Burns_11.jpg \n inflating: /data/test/Edward_Burns_20.jpg \n inflating: /data/test/Edward_Burns_21.jpg \n inflating: /data/test/Edward_Burns_30.jpg \n inflating: /data/test/Edward_Burns_31.jpg \n inflating: /data/test/Edward_Burns_50.jpg \n inflating: /data/test/Edward_Burns_51.jpg \n inflating: /data/test/Edward_Norton_10.jpg \n inflating: /data/test/Edward_Norton_11.jpg \n inflating: /data/test/Edward_Norton_30.jpg \n inflating: /data/test/Edward_Norton_31.jpg \n inflating: /data/test/Edward_Norton_40.jpg \n inflating: /data/test/Edward_Norton_41.jpg \n inflating: /data/test/Edward_Norton_50.jpg \n inflating: /data/test/Edward_Norton_51.jpg \n inflating: /data/test/Elaine_Chao_00.jpg \n inflating: /data/test/Elaine_Chao_01.jpg \n inflating: /data/test/Elaine_Chao_20.jpg \n inflating: /data/test/Elaine_Chao_21.jpg \n inflating: /data/test/Elaine_Chao_50.jpg \n inflating: /data/test/Elaine_Chao_51.jpg \n inflating: /data/test/Elaine_Stritch_10.jpg \n inflating: /data/test/Elaine_Stritch_11.jpg \n inflating: /data/test/Elaine_Stritch_40.jpg \n inflating: /data/test/Elaine_Stritch_41.jpg \n inflating: /data/test/Elaine_Stritch_50.jpg \n inflating: /data/test/Elaine_Stritch_51.jpg \n inflating: /data/test/Eliane_Karp_00.jpg \n inflating: /data/test/Eliane_Karp_01.jpg \n inflating: /data/test/Eliane_Karp_10.jpg \n inflating: /data/test/Eliane_Karp_11.jpg \n inflating: /data/test/Eliane_Karp_30.jpg \n inflating: /data/test/Eliane_Karp_31.jpg \n inflating: /data/test/Eliane_Karp_40.jpg \n inflating: /data/test/Eliane_Karp_41.jpg \n inflating: /data/test/Elijah_Wood_00.jpg \n inflating: /data/test/Elijah_Wood_01.jpg \n inflating: /data/test/Elijah_Wood_10.jpg \n inflating: /data/test/Elijah_Wood_11.jpg \n inflating: /data/test/Elijah_Wood_30.jpg \n inflating: /data/test/Elijah_Wood_31.jpg \n inflating: /data/test/Eliza_Dushku_00.jpg \n inflating: /data/test/Eliza_Dushku_01.jpg \n inflating: /data/test/Eliza_Dushku_10.jpg \n inflating: /data/test/Eliza_Dushku_11.jpg \n inflating: /data/test/Eliza_Dushku_20.jpg \n inflating: /data/test/Eliza_Dushku_21.jpg \n inflating: /data/test/Eliza_Dushku_30.jpg \n inflating: /data/test/Eliza_Dushku_31.jpg \n inflating: /data/test/Elizabeth_Dole_00.jpg \n inflating: /data/test/Elizabeth_Dole_01.jpg \n inflating: /data/test/Elizabeth_Dole_10.jpg \n inflating: /data/test/Elizabeth_Dole_11.jpg \n inflating: /data/test/Elizabeth_Dole_30.jpg \n inflating: /data/test/Elizabeth_Dole_31.jpg \n inflating: /data/test/Elizabeth_Shue_00.jpg \n inflating: /data/test/Elizabeth_Shue_01.jpg \n inflating: /data/test/Elizabeth_Shue_20.jpg \n inflating: /data/test/Elizabeth_Shue_21.jpg \n inflating: /data/test/Elizabeth_Shue_40.jpg \n inflating: /data/test/Elizabeth_Shue_41.jpg \n inflating: /data/test/Ellen_DeGeneres_10.jpg \n inflating: /data/test/Ellen_DeGeneres_11.jpg \n inflating: /data/test/Ellen_DeGeneres_40.jpg \n inflating: /data/test/Ellen_DeGeneres_41.jpg \n inflating: /data/test/Ellen_DeGeneres_50.jpg \n inflating: /data/test/Ellen_DeGeneres_51.jpg \n inflating: /data/test/Elmar_Brok_00.jpg \n inflating: /data/test/Elmar_Brok_01.jpg \n inflating: /data/test/Elmar_Brok_20.jpg \n inflating: /data/test/Elmar_Brok_21.jpg \n inflating: /data/test/Elmar_Brok_30.jpg \n inflating: /data/test/Elmar_Brok_31.jpg \n inflating: /data/test/Elsa_Zylberstein_00.jpg \n inflating: /data/test/Elsa_Zylberstein_01.jpg \n inflating: /data/test/Elsa_Zylberstein_10.jpg \n inflating: /data/test/Elsa_Zylberstein_11.jpg \n inflating: /data/test/Elsa_Zylberstein_40.jpg \n inflating: /data/test/Elsa_Zylberstein_41.jpg \n inflating: /data/test/Elton_John_10.jpg \n inflating: /data/test/Elton_John_11.jpg \n inflating: /data/test/Elton_John_20.jpg \n inflating: /data/test/Elton_John_21.jpg \n inflating: /data/test/Elton_John_30.jpg \n inflating: /data/test/Elton_John_31.jpg \n inflating: /data/test/Elton_John_40.jpg \n inflating: /data/test/Elton_John_41.jpg \n inflating: /data/test/Emile_Lahoud_00.jpg \n inflating: /data/test/Emile_Lahoud_01.jpg \n inflating: /data/test/Emile_Lahoud_30.jpg \n inflating: /data/test/Emile_Lahoud_31.jpg \n inflating: /data/test/Emile_Lahoud_40.jpg \n inflating: /data/test/Emile_Lahoud_41.jpg \n inflating: /data/test/Emilio_Botin_00.jpg \n inflating: /data/test/Emilio_Botin_01.jpg \n inflating: /data/test/Emilio_Botin_10.jpg \n inflating: /data/test/Emilio_Botin_11.jpg \n inflating: /data/test/Emilio_Botin_20.jpg \n inflating: /data/test/Emilio_Botin_21.jpg \n inflating: /data/test/Emilio_Botin_40.jpg \n inflating: /data/test/Emilio_Botin_41.jpg \n inflating: /data/test/Emma_Nicholson_10.jpg \n inflating: /data/test/Emma_Nicholson_11.jpg \n inflating: /data/test/Emma_Nicholson_20.jpg \n inflating: /data/test/Emma_Nicholson_21.jpg \n inflating: /data/test/Emma_Nicholson_30.jpg \n inflating: /data/test/Emma_Nicholson_31.jpg \n inflating: /data/test/Emma_Thompson_20.jpg \n inflating: /data/test/Emma_Thompson_21.jpg \n inflating: /data/test/Emma_Thompson_30.jpg \n inflating: /data/test/Emma_Thompson_31.jpg \n inflating: /data/test/Emma_Thompson_40.jpg \n inflating: /data/test/Emma_Thompson_41.jpg \n inflating: /data/test/Emma_Thompson_50.jpg \n inflating: /data/test/Emma_Thompson_51.jpg \n inflating: /data/test/Emmy_Rossum_20.jpg \n inflating: /data/test/Emmy_Rossum_21.jpg \n inflating: /data/test/Emmy_Rossum_30.jpg \n inflating: /data/test/Emmy_Rossum_31.jpg \n inflating: /data/test/Emmy_Rossum_40.jpg \n inflating: /data/test/Emmy_Rossum_41.jpg \n inflating: /data/test/Emmy_Rossum_50.jpg \n inflating: /data/test/Emmy_Rossum_51.jpg \n inflating: /data/test/Eric_Benet_00.jpg \n inflating: /data/test/Eric_Benet_01.jpg \n inflating: /data/test/Eric_Benet_10.jpg \n inflating: /data/test/Eric_Benet_11.jpg \n inflating: /data/test/Eric_Benet_30.jpg \n inflating: /data/test/Eric_Benet_31.jpg \n inflating: /data/test/Erin_Hershey_Presley_10.jpg \n inflating: /data/test/Erin_Hershey_Presley_11.jpg \n inflating: /data/test/Erin_Hershey_Presley_30.jpg \n inflating: /data/test/Erin_Hershey_Presley_31.jpg \n inflating: /data/test/Erin_Hershey_Presley_40.jpg \n inflating: /data/test/Erin_Hershey_Presley_41.jpg \n inflating: /data/test/Ernest_Hollings_00.jpg \n inflating: /data/test/Ernest_Hollings_01.jpg \n inflating: /data/test/Ernest_Hollings_10.jpg \n inflating: /data/test/Ernest_Hollings_11.jpg \n inflating: /data/test/Ernest_Hollings_20.jpg \n inflating: /data/test/Ernest_Hollings_21.jpg \n inflating: /data/test/Ernesto_Zedillo_10.jpg \n inflating: /data/test/Ernesto_Zedillo_11.jpg \n inflating: /data/test/Ernesto_Zedillo_20.jpg \n inflating: /data/test/Ernesto_Zedillo_21.jpg \n inflating: /data/test/Ernesto_Zedillo_30.jpg \n inflating: /data/test/Ernesto_Zedillo_31.jpg \n inflating: /data/test/Ernesto_Zedillo_40.jpg \n inflating: /data/test/Ernesto_Zedillo_41.jpg \n inflating: /data/test/Ernie_Grunfeld_20.jpg \n inflating: /data/test/Ernie_Grunfeld_21.jpg \n inflating: /data/test/Ernie_Grunfeld_30.jpg \n inflating: /data/test/Ernie_Grunfeld_31.jpg \n inflating: /data/test/Ernie_Grunfeld_40.jpg \n inflating: /data/test/Ernie_Grunfeld_41.jpg \n inflating: /data/test/Ernie_Grunfeld_50.jpg \n inflating: /data/test/Ernie_Grunfeld_51.jpg \n inflating: /data/test/Estelle_Morris_10.jpg \n inflating: /data/test/Estelle_Morris_11.jpg \n inflating: /data/test/Estelle_Morris_20.jpg \n inflating: /data/test/Estelle_Morris_21.jpg \n inflating: /data/test/Estelle_Morris_30.jpg \n inflating: /data/test/Estelle_Morris_31.jpg \n inflating: /data/test/Ethan_Hawke_00.jpg \n inflating: /data/test/Ethan_Hawke_01.jpg \n inflating: /data/test/Ethan_Hawke_10.jpg \n inflating: /data/test/Ethan_Hawke_11.jpg \n inflating: /data/test/Ethan_Hawke_30.jpg \n inflating: /data/test/Ethan_Hawke_31.jpg \n inflating: /data/test/Ethan_Hawke_40.jpg \n inflating: /data/test/Ethan_Hawke_41.jpg \n inflating: /data/test/Eunice_Barber_00.jpg \n inflating: /data/test/Eunice_Barber_01.jpg \n inflating: /data/test/Eunice_Barber_10.jpg \n inflating: /data/test/Eunice_Barber_11.jpg \n inflating: /data/test/Eunice_Barber_50.jpg \n inflating: /data/test/Eunice_Barber_51.jpg \n inflating: /data/test/Fernando_Henrique_Cardoso_00.jpg \n inflating: /data/test/Fernando_Henrique_Cardoso_01.jpg \n inflating: /data/test/Fernando_Henrique_Cardoso_20.jpg \n inflating: /data/test/Fernando_Henrique_Cardoso_21.jpg \n inflating: /data/test/Fernando_Henrique_Cardoso_30.jpg \n inflating: /data/test/Fernando_Henrique_Cardoso_31.jpg \n inflating: /data/test/Fernando_Sanz_30.jpg \n inflating: /data/test/Fernando_Sanz_31.jpg \n inflating: /data/test/Fernando_Sanz_40.jpg \n inflating: /data/test/Fernando_Sanz_41.jpg \n inflating: /data/test/Fernando_Sanz_50.jpg \n inflating: /data/test/Fernando_Sanz_51.jpg \n inflating: /data/test/Fidel_Castro_Daiz-Balart_10.jpg \n inflating: /data/test/Fidel_Castro_Daiz-Balart_11.jpg \n inflating: /data/test/Fidel_Castro_Daiz-Balart_30.jpg \n inflating: /data/test/Fidel_Castro_Daiz-Balart_31.jpg \n inflating: /data/test/Fidel_Castro_Daiz-Balart_40.jpg \n inflating: /data/test/Fidel_Castro_Daiz-Balart_41.jpg \n inflating: /data/test/Flavia_Pennetta_30.jpg \n inflating: /data/test/Flavia_Pennetta_31.jpg \n inflating: /data/test/Flavia_Pennetta_40.jpg \n inflating: /data/test/Flavia_Pennetta_41.jpg \n inflating: /data/test/Flavia_Pennetta_50.jpg \n inflating: /data/test/Flavia_Pennetta_51.jpg \n inflating: /data/test/Florecita_Cobian_00.jpg \n inflating: /data/test/Florecita_Cobian_01.jpg \n inflating: /data/test/Florecita_Cobian_10.jpg \n inflating: /data/test/Florecita_Cobian_11.jpg \n inflating: /data/test/Florecita_Cobian_20.jpg \n inflating: /data/test/Florecita_Cobian_21.jpg \n inflating: /data/test/Frances_Fisher_20.jpg \n inflating: /data/test/Frances_Fisher_21.jpg \n inflating: /data/test/Frances_Fisher_30.jpg \n inflating: /data/test/Frances_Fisher_31.jpg \n inflating: /data/test/Frances_Fisher_40.jpg \n inflating: /data/test/Frances_Fisher_41.jpg \n inflating: /data/test/Francis_Collins_00.jpg \n inflating: /data/test/Francis_Collins_01.jpg \n inflating: /data/test/Francis_Collins_10.jpg \n inflating: /data/test/Francis_Collins_11.jpg \n inflating: /data/test/Francis_Collins_20.jpg \n inflating: /data/test/Francis_Collins_21.jpg \n inflating: /data/test/Francis_Collins_40.jpg \n inflating: /data/test/Francis_Collins_41.jpg \n inflating: /data/test/Frank_Beamer_00.jpg \n inflating: /data/test/Frank_Beamer_01.jpg \n inflating: /data/test/Frank_Beamer_20.jpg \n inflating: /data/test/Frank_Beamer_21.jpg \n inflating: /data/test/Frank_Beamer_30.jpg \n inflating: /data/test/Frank_Beamer_31.jpg \n inflating: /data/test/Frank_Caliendo_10.jpg \n inflating: /data/test/Frank_Caliendo_11.jpg \n inflating: /data/test/Frank_Caliendo_30.jpg \n inflating: /data/test/Frank_Caliendo_31.jpg \n inflating: /data/test/Frank_Caliendo_40.jpg \n inflating: /data/test/Frank_Caliendo_41.jpg \n inflating: /data/test/Frank_Caliendo_50.jpg \n inflating: /data/test/Frank_Caliendo_51.jpg \n inflating: /data/test/Frank_Keating_30.jpg \n inflating: /data/test/Frank_Keating_31.jpg \n inflating: /data/test/Frank_Keating_40.jpg \n inflating: /data/test/Frank_Keating_41.jpg \n inflating: /data/test/Frank_Keating_50.jpg \n inflating: /data/test/Frank_Keating_51.jpg \n inflating: /data/test/Frank_Solich_10.jpg \n inflating: /data/test/Frank_Solich_11.jpg \n inflating: /data/test/Frank_Solich_20.jpg \n inflating: /data/test/Frank_Solich_21.jpg \n inflating: /data/test/Frank_Solich_30.jpg \n inflating: /data/test/Frank_Solich_31.jpg \n inflating: /data/test/Franz_Fischler_00.jpg \n inflating: /data/test/Franz_Fischler_01.jpg \n inflating: /data/test/Franz_Fischler_30.jpg \n inflating: /data/test/Franz_Fischler_31.jpg \n inflating: /data/test/Franz_Fischler_40.jpg \n inflating: /data/test/Franz_Fischler_41.jpg \n inflating: /data/test/Franz_Fischler_50.jpg \n inflating: /data/test/Franz_Fischler_51.jpg \n inflating: /data/test/Gabi_Zimmer_00.jpg \n inflating: /data/test/Gabi_Zimmer_01.jpg \n inflating: /data/test/Gabi_Zimmer_10.jpg \n inflating: /data/test/Gabi_Zimmer_11.jpg \n inflating: /data/test/Gabi_Zimmer_20.jpg \n inflating: /data/test/Gabi_Zimmer_21.jpg \n inflating: /data/test/Gabi_Zimmer_50.jpg \n inflating: /data/test/Gabi_Zimmer_51.jpg \n inflating: /data/test/Gary_Bettman_10.jpg \n inflating: /data/test/Gary_Bettman_11.jpg \n inflating: /data/test/Gary_Bettman_30.jpg \n inflating: /data/test/Gary_Bettman_31.jpg \n inflating: /data/test/Gary_Bettman_40.jpg \n inflating: /data/test/Gary_Bettman_41.jpg \n inflating: /data/test/Gary_Coleman_30.jpg \n inflating: /data/test/Gary_Coleman_31.jpg \n inflating: /data/test/Gary_Coleman_40.jpg \n inflating: /data/test/Gary_Coleman_41.jpg \n inflating: /data/test/Gary_Coleman_50.jpg \n inflating: /data/test/Gary_Coleman_51.jpg \n inflating: /data/test/Gary_Condit_00.jpg \n inflating: /data/test/Gary_Condit_01.jpg \n inflating: /data/test/Gary_Condit_10.jpg \n inflating: /data/test/Gary_Condit_11.jpg \n inflating: /data/test/Gary_Condit_30.jpg \n inflating: /data/test/Gary_Condit_31.jpg \n inflating: /data/test/Gene_Hackman_20.jpg \n inflating: /data/test/Gene_Hackman_21.jpg \n inflating: /data/test/Gene_Hackman_30.jpg \n inflating: /data/test/Gene_Hackman_31.jpg \n inflating: /data/test/Gene_Hackman_40.jpg \n inflating: /data/test/Gene_Hackman_41.jpg \n inflating: /data/test/Geoffrey_Rush_00.jpg \n inflating: /data/test/Geoffrey_Rush_01.jpg \n inflating: /data/test/Geoffrey_Rush_10.jpg \n inflating: /data/test/Geoffrey_Rush_11.jpg \n inflating: /data/test/Geoffrey_Rush_20.jpg \n inflating: /data/test/Geoffrey_Rush_21.jpg \n inflating: /data/test/George_Galloway_00.jpg \n inflating: /data/test/George_Galloway_01.jpg \n inflating: /data/test/George_Galloway_20.jpg \n inflating: /data/test/George_Galloway_21.jpg \n inflating: /data/test/George_Galloway_40.jpg \n inflating: /data/test/George_Galloway_41.jpg \n inflating: /data/test/George_Galloway_50.jpg \n inflating: /data/test/George_Galloway_51.jpg \n inflating: /data/test/George_Karl_10.jpg \n inflating: /data/test/George_Karl_11.jpg \n inflating: /data/test/George_Karl_20.jpg \n inflating: /data/test/George_Karl_21.jpg \n inflating: /data/test/George_Karl_50.jpg \n inflating: /data/test/George_Karl_51.jpg \n inflating: /data/test/GL_Peiris_00.jpg \n inflating: /data/test/GL_Peiris_01.jpg \n inflating: /data/test/GL_Peiris_10.jpg \n inflating: /data/test/GL_Peiris_11.jpg \n inflating: /data/test/GL_Peiris_30.jpg \n inflating: /data/test/GL_Peiris_31.jpg \n inflating: /data/test/Hanan_Ashrawi_10.jpg \n inflating: /data/test/Hanan_Ashrawi_11.jpg \n inflating: /data/test/Hanan_Ashrawi_20.jpg \n inflating: /data/test/Hanan_Ashrawi_21.jpg \n inflating: /data/test/Hanan_Ashrawi_40.jpg \n inflating: /data/test/Hanan_Ashrawi_41.jpg \n inflating: /data/test/Harrison_Ford_10.jpg \n inflating: /data/test/Harrison_Ford_11.jpg \n inflating: /data/test/Harrison_Ford_20.jpg \n inflating: /data/test/Harrison_Ford_21.jpg \n inflating: /data/test/Harrison_Ford_50.jpg \n inflating: /data/test/Harrison_Ford_51.jpg \n inflating: /data/test/Hassan_Nasrallah_30.jpg \n inflating: /data/test/Hassan_Nasrallah_31.jpg \n inflating: /data/test/Hassan_Nasrallah_40.jpg \n inflating: /data/test/Hassan_Nasrallah_41.jpg \n inflating: /data/test/Hassan_Nasrallah_50.jpg \n inflating: /data/test/Hassan_Nasrallah_51.jpg \n inflating: /data/test/Irene_Kahn_00.jpg \n inflating: /data/test/Irene_Kahn_01.jpg \n inflating: /data/test/Irene_Kahn_30.jpg \n inflating: /data/test/Irene_Kahn_31.jpg \n inflating: /data/test/Irene_Kahn_40.jpg \n inflating: /data/test/Irene_Kahn_41.jpg \n inflating: /data/test/Isabella_Rossellini_00.jpg \n inflating: /data/test/Isabella_Rossellini_01.jpg \n inflating: /data/test/Isabella_Rossellini_10.jpg \n inflating: /data/test/Isabella_Rossellini_11.jpg \n inflating: /data/test/Isabella_Rossellini_20.jpg \n inflating: /data/test/Isabella_Rossellini_21.jpg \n inflating: /data/test/Isabelle_Huppert_20.jpg \n inflating: /data/test/Isabelle_Huppert_21.jpg \n inflating: /data/test/Isabelle_Huppert_30.jpg \n inflating: /data/test/Isabelle_Huppert_31.jpg \n inflating: /data/test/Isabelle_Huppert_40.jpg \n inflating: /data/test/Isabelle_Huppert_41.jpg \n inflating: /data/test/Itzhak_Perlman_10.jpg \n inflating: /data/test/Itzhak_Perlman_11.jpg \n inflating: /data/test/Itzhak_Perlman_30.jpg \n inflating: /data/test/Itzhak_Perlman_31.jpg \n inflating: /data/test/Itzhak_Perlman_40.jpg \n inflating: /data/test/Itzhak_Perlman_41.jpg \n inflating: /data/test/Jack_Welch_10.jpg \n inflating: /data/test/Jack_Welch_11.jpg \n inflating: /data/test/Jack_Welch_30.jpg \n inflating: /data/test/Jack_Welch_31.jpg \n inflating: /data/test/Jack_Welch_40.jpg \n inflating: /data/test/Jack_Welch_41.jpg \n inflating: /data/test/Jack_Welch_50.jpg \n inflating: /data/test/Jack_Welch_51.jpg \n inflating: /data/test/Jackie_Sherrill_20.jpg \n inflating: /data/test/Jackie_Sherrill_21.jpg \n inflating: /data/test/Jackie_Sherrill_40.jpg \n inflating: /data/test/Jackie_Sherrill_41.jpg \n inflating: /data/test/Jackie_Sherrill_50.jpg \n inflating: /data/test/Jackie_Sherrill_51.jpg \n inflating: /data/test/Jacqueline_Gold_00.jpg \n inflating: /data/test/Jacqueline_Gold_01.jpg \n inflating: /data/test/Jacqueline_Gold_20.jpg \n inflating: /data/test/Jacqueline_Gold_21.jpg \n inflating: /data/test/Jacqueline_Gold_30.jpg \n inflating: /data/test/Jacqueline_Gold_31.jpg \n inflating: /data/test/Jafar_Umar_Thalib_00.jpg \n inflating: /data/test/Jafar_Umar_Thalib_01.jpg \n inflating: /data/test/Jafar_Umar_Thalib_20.jpg \n inflating: /data/test/Jafar_Umar_Thalib_21.jpg \n inflating: /data/test/Jafar_Umar_Thalib_30.jpg \n inflating: /data/test/Jafar_Umar_Thalib_31.jpg \n inflating: /data/test/Jafar_Umar_Thalib_50.jpg \n inflating: /data/test/Jafar_Umar_Thalib_51.jpg \n inflating: /data/test/Jaime_Pressly_00.jpg \n inflating: /data/test/Jaime_Pressly_01.jpg \n inflating: /data/test/Jaime_Pressly_10.jpg \n inflating: /data/test/Jaime_Pressly_11.jpg \n inflating: /data/test/Jaime_Pressly_40.jpg \n inflating: /data/test/Jaime_Pressly_41.jpg \n inflating: /data/test/Jake_Gyllenhaal_00.jpg \n inflating: /data/test/Jake_Gyllenhaal_01.jpg \n inflating: /data/test/Jake_Gyllenhaal_40.jpg \n inflating: /data/test/Jake_Gyllenhaal_41.jpg \n inflating: /data/test/Jake_Gyllenhaal_50.jpg \n inflating: /data/test/Jake_Gyllenhaal_51.jpg \n inflating: /data/test/Jake_Plummer_20.jpg \n inflating: /data/test/Jake_Plummer_21.jpg \n inflating: /data/test/Jake_Plummer_40.jpg \n inflating: /data/test/Jake_Plummer_41.jpg \n inflating: /data/test/Jake_Plummer_50.jpg \n inflating: /data/test/Jake_Plummer_51.jpg \n inflating: /data/test/James_Carville_00.jpg \n inflating: /data/test/James_Carville_01.jpg \n inflating: /data/test/James_Carville_10.jpg \n inflating: /data/test/James_Carville_11.jpg \n inflating: /data/test/James_Carville_30.jpg \n inflating: /data/test/James_Carville_31.jpg \n inflating: /data/test/James_Carville_50.jpg \n inflating: /data/test/James_Carville_51.jpg \n inflating: /data/test/James_Cunningham_00.jpg \n inflating: /data/test/James_Cunningham_01.jpg \n inflating: /data/test/James_Cunningham_20.jpg \n inflating: /data/test/James_Cunningham_21.jpg \n inflating: /data/test/James_Cunningham_30.jpg \n inflating: /data/test/James_Cunningham_31.jpg \n inflating: /data/test/James_Cunningham_40.jpg \n inflating: /data/test/James_Cunningham_41.jpg \n inflating: /data/test/James_Hoffa_10.jpg \n inflating: /data/test/James_Hoffa_11.jpg \n inflating: /data/test/James_Hoffa_20.jpg \n inflating: /data/test/James_Hoffa_21.jpg \n inflating: /data/test/James_Hoffa_40.jpg \n inflating: /data/test/James_Hoffa_41.jpg \n inflating: /data/test/James_Hoffa_50.jpg \n inflating: /data/test/James_Hoffa_51.jpg \n inflating: /data/test/James_Lockhart_00.jpg \n inflating: /data/test/James_Lockhart_01.jpg \n inflating: /data/test/James_Lockhart_10.jpg \n inflating: /data/test/James_Lockhart_11.jpg \n inflating: /data/test/James_Lockhart_50.jpg \n inflating: /data/test/James_Lockhart_51.jpg \n inflating: /data/test/James_McPherson_00.jpg \n inflating: /data/test/James_McPherson_01.jpg \n inflating: /data/test/James_McPherson_10.jpg \n inflating: /data/test/James_McPherson_11.jpg \n inflating: /data/test/James_McPherson_20.jpg \n inflating: /data/test/James_McPherson_21.jpg \n inflating: /data/test/James_Wolfensohn_00.jpg \n inflating: /data/test/James_Wolfensohn_01.jpg \n inflating: /data/test/James_Wolfensohn_20.jpg \n inflating: /data/test/James_Wolfensohn_21.jpg \n inflating: /data/test/James_Wolfensohn_30.jpg \n inflating: /data/test/James_Wolfensohn_31.jpg \n inflating: /data/test/James_Wolfensohn_50.jpg \n inflating: /data/test/James_Wolfensohn_51.jpg \n inflating: /data/test/Jan_Peter_Balkenende_00.jpg \n inflating: /data/test/Jan_Peter_Balkenende_01.jpg \n inflating: /data/test/Jan_Peter_Balkenende_10.jpg \n inflating: /data/test/Jan_Peter_Balkenende_11.jpg \n inflating: /data/test/Jan_Peter_Balkenende_30.jpg \n inflating: /data/test/Jan_Peter_Balkenende_31.jpg \n inflating: /data/test/Jan_Peter_Balkenende_50.jpg \n inflating: /data/test/Jan_Peter_Balkenende_51.jpg \n inflating: /data/test/Jane_Krakowski_00.jpg \n inflating: /data/test/Jane_Krakowski_01.jpg \n inflating: /data/test/Jane_Krakowski_10.jpg \n inflating: /data/test/Jane_Krakowski_11.jpg \n inflating: /data/test/Jane_Krakowski_40.jpg \n inflating: /data/test/Jane_Krakowski_41.jpg \n inflating: /data/test/Jane_Krakowski_50.jpg \n inflating: /data/test/Jane_Krakowski_51.jpg \n inflating: /data/test/Jane_Pauley_10.jpg \n inflating: /data/test/Jane_Pauley_11.jpg \n inflating: /data/test/Jane_Pauley_30.jpg \n inflating: /data/test/Jane_Pauley_31.jpg \n inflating: /data/test/Jane_Pauley_40.jpg \n inflating: /data/test/Jane_Pauley_41.jpg \n inflating: /data/test/Jane_Rooney_00.jpg \n inflating: /data/test/Jane_Rooney_01.jpg \n inflating: /data/test/Jane_Rooney_10.jpg \n inflating: /data/test/Jane_Rooney_11.jpg \n inflating: /data/test/Jane_Rooney_20.jpg \n inflating: /data/test/Jane_Rooney_21.jpg \n inflating: /data/test/Janis_Ruth_Coulter_00.jpg \n inflating: /data/test/Janis_Ruth_Coulter_01.jpg \n inflating: /data/test/Janis_Ruth_Coulter_20.jpg \n inflating: /data/test/Janis_Ruth_Coulter_21.jpg \n inflating: /data/test/Janis_Ruth_Coulter_40.jpg \n inflating: /data/test/Janis_Ruth_Coulter_41.jpg \n inflating: /data/test/Janis_Ruth_Coulter_50.jpg \n inflating: /data/test/Janis_Ruth_Coulter_51.jpg \n inflating: /data/test/JK_Rowling_20.jpg \n inflating: /data/test/JK_Rowling_21.jpg \n inflating: /data/test/JK_Rowling_30.jpg \n inflating: /data/test/JK_Rowling_31.jpg \n inflating: /data/test/JK_Rowling_40.jpg \n inflating: /data/test/JK_Rowling_41.jpg \n inflating: /data/test/JK_Rowling_50.jpg \n inflating: /data/test/JK_Rowling_51.jpg \n inflating: /data/test/Kate_Capshaw_10.jpg \n inflating: /data/test/Kate_Capshaw_11.jpg \n inflating: /data/test/Kate_Capshaw_20.jpg \n inflating: /data/test/Kate_Capshaw_21.jpg \n inflating: /data/test/Kate_Capshaw_40.jpg \n inflating: /data/test/Kate_Capshaw_41.jpg \n inflating: /data/test/Kate_Winslet_00.jpg \n inflating: /data/test/Kate_Winslet_01.jpg \n inflating: /data/test/Kate_Winslet_10.jpg \n inflating: /data/test/Kate_Winslet_11.jpg \n inflating: /data/test/Kate_Winslet_50.jpg \n inflating: /data/test/Kate_Winslet_51.jpg \n inflating: /data/test/Katharine_Hepburn_10.jpg \n inflating: /data/test/Katharine_Hepburn_11.jpg \n inflating: /data/test/Katharine_Hepburn_30.jpg \n inflating: /data/test/Katharine_Hepburn_31.jpg \n inflating: /data/test/Katharine_Hepburn_40.jpg \n inflating: /data/test/Katharine_Hepburn_41.jpg \n inflating: /data/test/Kathryn_Morris_10.jpg \n inflating: /data/test/Kathryn_Morris_11.jpg \n inflating: /data/test/Kathryn_Morris_20.jpg \n inflating: /data/test/Kathryn_Morris_21.jpg \n inflating: /data/test/Kathryn_Morris_40.jpg \n inflating: /data/test/Kathryn_Morris_41.jpg \n inflating: /data/test/Kathryn_Morris_50.jpg \n inflating: /data/test/Kathryn_Morris_51.jpg \n inflating: /data/test/Katja_Riemann_00.jpg \n inflating: /data/test/Katja_Riemann_01.jpg \n inflating: /data/test/Katja_Riemann_10.jpg \n inflating: /data/test/Katja_Riemann_11.jpg \n inflating: /data/test/Katja_Riemann_20.jpg \n inflating: /data/test/Katja_Riemann_21.jpg \n inflating: /data/test/Keith_Olbermann_00.jpg \n inflating: /data/test/Keith_Olbermann_01.jpg \n inflating: /data/test/Keith_Olbermann_10.jpg \n inflating: /data/test/Keith_Olbermann_11.jpg \n inflating: /data/test/Keith_Olbermann_20.jpg \n inflating: /data/test/Keith_Olbermann_21.jpg \n inflating: /data/test/Keith_Olbermann_50.jpg \n inflating: /data/test/Keith_Olbermann_51.jpg \n inflating: /data/test/Keith_Tyson_00.jpg \n inflating: /data/test/Keith_Tyson_01.jpg \n inflating: /data/test/Keith_Tyson_10.jpg \n inflating: /data/test/Keith_Tyson_11.jpg \n inflating: /data/test/Keith_Tyson_20.jpg \n inflating: /data/test/Keith_Tyson_21.jpg \n inflating: /data/test/Kemal_Dervis_00.jpg \n inflating: /data/test/Kemal_Dervis_01.jpg \n inflating: /data/test/Kemal_Dervis_10.jpg \n inflating: /data/test/Kemal_Dervis_11.jpg \n inflating: /data/test/Kemal_Dervis_30.jpg \n inflating: /data/test/Kemal_Dervis_31.jpg \n inflating: /data/test/Kevin_Satterfield_00.jpg \n inflating: /data/test/Kevin_Satterfield_01.jpg \n inflating: /data/test/Kevin_Satterfield_10.jpg \n inflating: /data/test/Kevin_Satterfield_11.jpg \n inflating: /data/test/Kevin_Satterfield_20.jpg \n inflating: /data/test/Kevin_Satterfield_21.jpg \n inflating: /data/test/Kieran_Culkin_00.jpg \n inflating: /data/test/Kieran_Culkin_01.jpg \n inflating: /data/test/Kieran_Culkin_10.jpg \n inflating: /data/test/Kieran_Culkin_11.jpg \n inflating: /data/test/Kieran_Culkin_20.jpg \n inflating: /data/test/Kieran_Culkin_21.jpg \n inflating: /data/test/Kirk_Ferentz_00.jpg \n inflating: /data/test/Kirk_Ferentz_01.jpg \n inflating: /data/test/Kirk_Ferentz_20.jpg \n inflating: /data/test/Kirk_Ferentz_21.jpg \n inflating: /data/test/Kirk_Ferentz_40.jpg \n inflating: /data/test/Kirk_Ferentz_41.jpg \n inflating: /data/test/Kirk_Ferentz_50.jpg \n inflating: /data/test/Kirk_Ferentz_51.jpg \n inflating: /data/test/Kirsten_Dunst_00.jpg \n inflating: /data/test/Kirsten_Dunst_01.jpg \n inflating: /data/test/Kirsten_Dunst_20.jpg \n inflating: /data/test/Kirsten_Dunst_21.jpg \n inflating: /data/test/Kirsten_Dunst_30.jpg \n inflating: /data/test/Kirsten_Dunst_31.jpg \n inflating: /data/test/Kit_Bond_10.jpg \n inflating: /data/test/Kit_Bond_11.jpg \n inflating: /data/test/Kit_Bond_20.jpg \n inflating: /data/test/Kit_Bond_21.jpg \n inflating: /data/test/Kit_Bond_30.jpg \n inflating: /data/test/Kit_Bond_31.jpg \n inflating: /data/test/Kit_Bond_50.jpg \n inflating: /data/test/Kit_Bond_51.jpg \n inflating: /data/test/Kristen_Breitweiser_00.jpg \n inflating: /data/test/Kristen_Breitweiser_01.jpg \n inflating: /data/test/Kristen_Breitweiser_10.jpg \n inflating: /data/test/Kristen_Breitweiser_11.jpg \n inflating: /data/test/Kristen_Breitweiser_20.jpg \n inflating: /data/test/Kristen_Breitweiser_21.jpg \n inflating: /data/test/Kristen_Breitweiser_50.jpg \n inflating: /data/test/Kristen_Breitweiser_51.jpg \n inflating: /data/test/Kristin_Chenoweth_10.jpg \n inflating: /data/test/Kristin_Chenoweth_11.jpg \n inflating: /data/test/Kristin_Chenoweth_40.jpg \n inflating: /data/test/Kristin_Chenoweth_41.jpg \n inflating: /data/test/Kristin_Chenoweth_50.jpg \n inflating: /data/test/Kristin_Chenoweth_51.jpg \n inflating: /data/test/Kristin_Scott_10.jpg \n inflating: /data/test/Kristin_Scott_11.jpg \n inflating: /data/test/Kristin_Scott_40.jpg \n inflating: /data/test/Kristin_Scott_41.jpg \n inflating: /data/test/Kristin_Scott_50.jpg \n inflating: /data/test/Kristin_Scott_51.jpg \n inflating: /data/test/Kristy_Curry_00.jpg \n inflating: /data/test/Kristy_Curry_01.jpg \n inflating: /data/test/Kristy_Curry_20.jpg \n inflating: /data/test/Kristy_Curry_21.jpg \n inflating: /data/test/Kristy_Curry_30.jpg \n inflating: /data/test/Kristy_Curry_31.jpg \n inflating: /data/test/Kurt_Warner_00.jpg \n inflating: /data/test/Kurt_Warner_01.jpg \n inflating: /data/test/Kurt_Warner_10.jpg \n inflating: /data/test/Kurt_Warner_11.jpg \n inflating: /data/test/Kurt_Warner_40.jpg \n inflating: /data/test/Kurt_Warner_41.jpg \n inflating: /data/test/Kweisi_Mfume_00.jpg \n inflating: /data/test/Kweisi_Mfume_01.jpg \n inflating: /data/test/Kweisi_Mfume_10.jpg \n inflating: /data/test/Kweisi_Mfume_11.jpg \n inflating: /data/test/Kweisi_Mfume_40.jpg \n inflating: /data/test/Kweisi_Mfume_41.jpg \n inflating: /data/test/Kweisi_Mfume_50.jpg \n inflating: /data/test/Kweisi_Mfume_51.jpg \n inflating: /data/test/Kyle_Shewfelt_00.jpg \n inflating: /data/test/Kyle_Shewfelt_01.jpg \n inflating: /data/test/Kyle_Shewfelt_10.jpg \n inflating: /data/test/Kyle_Shewfelt_11.jpg \n inflating: /data/test/Kyle_Shewfelt_20.jpg \n inflating: /data/test/Kyle_Shewfelt_21.jpg \n inflating: /data/test/Kyle_Shewfelt_40.jpg \n inflating: /data/test/Kyle_Shewfelt_41.jpg \n inflating: /data/test/Larry_Flynt_00.jpg \n inflating: /data/test/Larry_Flynt_01.jpg \n inflating: /data/test/Larry_Flynt_10.jpg \n inflating: /data/test/Larry_Flynt_11.jpg \n inflating: /data/test/Larry_Flynt_20.jpg \n inflating: /data/test/Larry_Flynt_21.jpg \n inflating: /data/test/Laura_Bozzo_00.jpg \n inflating: /data/test/Laura_Bozzo_01.jpg \n inflating: /data/test/Laura_Bozzo_10.jpg \n inflating: /data/test/Laura_Bozzo_11.jpg \n inflating: /data/test/Laura_Bozzo_40.jpg \n inflating: /data/test/Laura_Bozzo_41.jpg \n inflating: /data/test/Laura_Bush_10.jpg \n inflating: /data/test/Laura_Bush_11.jpg \n inflating: /data/test/Laura_Bush_20.jpg \n inflating: /data/test/Laura_Bush_21.jpg \n inflating: /data/test/Laura_Bush_40.jpg \n inflating: /data/test/Laura_Bush_41.jpg \n inflating: /data/test/Laura_Bush_50.jpg \n inflating: /data/test/Laura_Bush_51.jpg \n inflating: /data/test/Laura_Elena_Harring_00.jpg \n inflating: /data/test/Laura_Elena_Harring_01.jpg \n inflating: /data/test/Laura_Elena_Harring_20.jpg \n inflating: /data/test/Laura_Elena_Harring_21.jpg \n inflating: /data/test/Laura_Elena_Harring_40.jpg \n inflating: /data/test/Laura_Elena_Harring_41.jpg \n inflating: /data/test/Laura_Elena_Harring_50.jpg \n inflating: /data/test/Laura_Elena_Harring_51.jpg \n inflating: /data/test/Laurence_Fishburne_20.jpg \n inflating: /data/test/Laurence_Fishburne_21.jpg \n inflating: /data/test/Laurence_Fishburne_40.jpg \n inflating: /data/test/Laurence_Fishburne_41.jpg \n inflating: /data/test/Laurence_Fishburne_50.jpg \n inflating: /data/test/Laurence_Fishburne_51.jpg \n inflating: /data/test/Lee_Baca_00.jpg \n inflating: /data/test/Lee_Baca_01.jpg \n inflating: /data/test/Lee_Baca_10.jpg \n inflating: /data/test/Lee_Baca_11.jpg \n inflating: /data/test/Lee_Baca_40.jpg \n inflating: /data/test/Lee_Baca_41.jpg \n inflating: /data/test/Lene_Espersen_10.jpg \n inflating: /data/test/Lene_Espersen_11.jpg \n inflating: /data/test/Lene_Espersen_20.jpg \n inflating: /data/test/Lene_Espersen_21.jpg \n inflating: /data/test/Lene_Espersen_40.jpg \n inflating: /data/test/Lene_Espersen_41.jpg \n inflating: /data/test/Lesia_Burlak_00.jpg \n inflating: /data/test/Lesia_Burlak_01.jpg \n inflating: /data/test/Lesia_Burlak_20.jpg \n inflating: /data/test/Lesia_Burlak_21.jpg \n inflating: /data/test/Lesia_Burlak_30.jpg \n inflating: /data/test/Lesia_Burlak_31.jpg \n inflating: /data/test/Lester_Holt_00.jpg \n inflating: /data/test/Lester_Holt_01.jpg \n inflating: /data/test/Lester_Holt_30.jpg \n inflating: /data/test/Lester_Holt_31.jpg \n inflating: /data/test/Lester_Holt_40.jpg \n inflating: /data/test/Lester_Holt_41.jpg \n inflating: /data/test/Leszek_Miller_00.jpg \n inflating: /data/test/Leszek_Miller_01.jpg \n inflating: /data/test/Leszek_Miller_10.jpg \n inflating: /data/test/Leszek_Miller_11.jpg \n inflating: /data/test/Leszek_Miller_30.jpg \n inflating: /data/test/Leszek_Miller_31.jpg \n inflating: /data/test/Leticia_Van_de_Putte_00.jpg \n inflating: /data/test/Leticia_Van_de_Putte_01.jpg \n inflating: /data/test/Leticia_Van_de_Putte_10.jpg \n inflating: /data/test/Leticia_Van_de_Putte_11.jpg \n inflating: /data/test/Leticia_Van_de_Putte_40.jpg \n inflating: /data/test/Leticia_Van_de_Putte_41.jpg \n inflating: /data/test/Leuris_Pupo_00.jpg \n inflating: /data/test/Leuris_Pupo_01.jpg \n inflating: /data/test/Leuris_Pupo_20.jpg \n inflating: /data/test/Leuris_Pupo_21.jpg \n inflating: /data/test/Leuris_Pupo_30.jpg \n inflating: /data/test/Leuris_Pupo_31.jpg \n inflating: /data/test/Leuris_Pupo_40.jpg \n inflating: /data/test/Leuris_Pupo_41.jpg \n inflating: /data/test/Li_Zhaoxing_00.jpg \n inflating: /data/test/Li_Zhaoxing_01.jpg \n inflating: /data/test/Li_Zhaoxing_30.jpg \n inflating: /data/test/Li_Zhaoxing_31.jpg \n inflating: /data/test/Li_Zhaoxing_40.jpg \n inflating: /data/test/Li_Zhaoxing_41.jpg \n inflating: /data/test/Lincoln_Chafee_20.jpg \n inflating: /data/test/Lincoln_Chafee_21.jpg \n inflating: /data/test/Lincoln_Chafee_30.jpg \n inflating: /data/test/Lincoln_Chafee_31.jpg \n inflating: /data/test/Lincoln_Chafee_50.jpg \n inflating: /data/test/Lincoln_Chafee_51.jpg \n inflating: /data/test/Linda_Dano_00.jpg \n inflating: /data/test/Linda_Dano_01.jpg \n inflating: /data/test/Linda_Dano_20.jpg \n inflating: /data/test/Linda_Dano_21.jpg \n inflating: /data/test/Linda_Dano_30.jpg \n inflating: /data/test/Linda_Dano_31.jpg \n inflating: /data/test/Linda_Dano_50.jpg \n inflating: /data/test/Linda_Dano_51.jpg \n inflating: /data/test/Linda_Franklin_00.jpg \n inflating: /data/test/Linda_Franklin_01.jpg \n inflating: /data/test/Linda_Franklin_10.jpg \n inflating: /data/test/Linda_Franklin_11.jpg \n inflating: /data/test/Linda_Franklin_20.jpg \n inflating: /data/test/Linda_Franklin_21.jpg \n inflating: /data/test/Linda_Franklin_40.jpg \n inflating: /data/test/Linda_Franklin_41.jpg \n inflating: /data/test/Linda_Sanchez_00.jpg \n inflating: /data/test/Linda_Sanchez_01.jpg \n inflating: /data/test/Linda_Sanchez_10.jpg \n inflating: /data/test/Linda_Sanchez_11.jpg \n inflating: /data/test/Linda_Sanchez_20.jpg \n inflating: /data/test/Linda_Sanchez_21.jpg \n inflating: /data/test/Linda_Sanchez_40.jpg \n inflating: /data/test/Linda_Sanchez_41.jpg \n inflating: /data/test/Lindsey_Graham_00.jpg \n inflating: /data/test/Lindsey_Graham_01.jpg \n inflating: /data/test/Lindsey_Graham_10.jpg \n inflating: /data/test/Lindsey_Graham_11.jpg \n inflating: /data/test/Lindsey_Graham_20.jpg \n inflating: /data/test/Lindsey_Graham_21.jpg \n inflating: /data/test/Lindsey_Graham_30.jpg \n inflating: /data/test/Lindsey_Graham_31.jpg \n inflating: /data/test/Lino_Oviedo_00.jpg \n inflating: /data/test/Lino_Oviedo_01.jpg \n inflating: /data/test/Lino_Oviedo_30.jpg \n inflating: /data/test/Lino_Oviedo_31.jpg \n inflating: /data/test/Lino_Oviedo_50.jpg \n inflating: /data/test/Lino_Oviedo_51.jpg \n inflating: /data/test/Lisa_Ling_00.jpg \n inflating: /data/test/Lisa_Ling_01.jpg \n inflating: /data/test/Lisa_Ling_10.jpg \n inflating: /data/test/Lisa_Ling_11.jpg \n inflating: /data/test/Lisa_Ling_20.jpg \n inflating: /data/test/Lisa_Ling_21.jpg \n inflating: /data/test/Liu_Ye_00.jpg \n inflating: /data/test/Liu_Ye_01.jpg \n inflating: /data/test/Liu_Ye_10.jpg \n inflating: /data/test/Liu_Ye_11.jpg \n inflating: /data/test/Liu_Ye_20.jpg \n inflating: /data/test/Liu_Ye_21.jpg \n inflating: /data/test/Liu_Ye_50.jpg \n inflating: /data/test/Liu_Ye_51.jpg \n inflating: /data/test/Loretta_Lynn_Harper_00.jpg \n inflating: /data/test/Loretta_Lynn_Harper_01.jpg \n inflating: /data/test/Loretta_Lynn_Harper_30.jpg \n inflating: /data/test/Loretta_Lynn_Harper_31.jpg \n inflating: /data/test/Loretta_Lynn_Harper_40.jpg \n inflating: /data/test/Loretta_Lynn_Harper_41.jpg \n inflating: /data/test/Loretta_Lynn_Harper_50.jpg \n inflating: /data/test/Loretta_Lynn_Harper_51.jpg \n inflating: /data/test/Louis_Van_Gaal_00.jpg \n inflating: /data/test/Louis_Van_Gaal_01.jpg \n inflating: /data/test/Louis_Van_Gaal_10.jpg \n inflating: /data/test/Louis_Van_Gaal_11.jpg \n inflating: /data/test/Louis_Van_Gaal_40.jpg \n inflating: /data/test/Louis_Van_Gaal_41.jpg \n inflating: /data/test/Louisa_Baileche_00.jpg \n inflating: /data/test/Louisa_Baileche_01.jpg \n inflating: /data/test/Louisa_Baileche_10.jpg \n inflating: /data/test/Louisa_Baileche_11.jpg \n inflating: /data/test/Louisa_Baileche_20.jpg \n inflating: /data/test/Louisa_Baileche_21.jpg \n inflating: /data/test/Luc_Montagnier_20.jpg \n inflating: /data/test/Luc_Montagnier_21.jpg \n inflating: /data/test/Luc_Montagnier_40.jpg \n inflating: /data/test/Luc_Montagnier_41.jpg \n inflating: /data/test/Luc_Montagnier_50.jpg \n inflating: /data/test/Luc_Montagnier_51.jpg \n inflating: /data/test/Lucia_Kenny_Anthony_00.jpg \n inflating: /data/test/Lucia_Kenny_Anthony_01.jpg \n inflating: /data/test/Lucia_Kenny_Anthony_10.jpg \n inflating: /data/test/Lucia_Kenny_Anthony_11.jpg \n inflating: /data/test/Lucia_Kenny_Anthony_40.jpg \n inflating: /data/test/Lucia_Kenny_Anthony_41.jpg \n inflating: /data/test/Lucia_Kenny_Anthony_50.jpg \n inflating: /data/test/Lucia_Kenny_Anthony_51.jpg \n inflating: /data/test/Lucio_Stanca_00.jpg \n inflating: /data/test/Lucio_Stanca_01.jpg \n inflating: /data/test/Lucio_Stanca_20.jpg \n inflating: /data/test/Lucio_Stanca_21.jpg \n inflating: /data/test/Lucio_Stanca_30.jpg \n inflating: /data/test/Lucio_Stanca_31.jpg \n inflating: /data/test/Lucio_Stanca_40.jpg \n inflating: /data/test/Lucio_Stanca_41.jpg \n inflating: /data/test/Luis_Ernesto_Derbez_Bautista_00.jpg \n inflating: /data/test/Luis_Ernesto_Derbez_Bautista_01.jpg \n inflating: /data/test/Luis_Ernesto_Derbez_Bautista_10.jpg \n inflating: /data/test/Luis_Ernesto_Derbez_Bautista_11.jpg \n inflating: /data/test/Luis_Ernesto_Derbez_Bautista_50.jpg \n inflating: /data/test/Luis_Ernesto_Derbez_Bautista_51.jpg \n inflating: /data/test/Luis_Fonsi_20.jpg \n inflating: /data/test/Luis_Fonsi_21.jpg \n inflating: /data/test/Luis_Fonsi_40.jpg \n inflating: /data/test/Luis_Fonsi_41.jpg \n inflating: /data/test/Luis_Fonsi_50.jpg \n inflating: /data/test/Luis_Fonsi_51.jpg \n inflating: /data/test/Lyle_Lovett_20.jpg \n inflating: /data/test/Lyle_Lovett_21.jpg \n inflating: /data/test/Lyle_Lovett_40.jpg \n inflating: /data/test/Lyle_Lovett_41.jpg \n inflating: /data/test/Lyle_Lovett_50.jpg \n inflating: /data/test/Lyle_Lovett_51.jpg \n inflating: /data/test/Mack_Brown_00.jpg \n inflating: /data/test/Mack_Brown_01.jpg \n inflating: /data/test/Mack_Brown_40.jpg \n inflating: /data/test/Mack_Brown_41.jpg \n inflating: /data/test/Mack_Brown_50.jpg \n inflating: /data/test/Mack_Brown_51.jpg \n inflating: /data/test/Maggie_Cheung_00.jpg \n inflating: /data/test/Maggie_Cheung_01.jpg \n inflating: /data/test/Maggie_Cheung_30.jpg \n inflating: /data/test/Maggie_Cheung_31.jpg \n inflating: /data/test/Maggie_Cheung_50.jpg \n inflating: /data/test/Maggie_Cheung_51.jpg \n inflating: /data/test/Maggie_Smith_00.jpg \n inflating: /data/test/Maggie_Smith_01.jpg \n inflating: /data/test/Maggie_Smith_30.jpg \n inflating: /data/test/Maggie_Smith_31.jpg \n inflating: /data/test/Maggie_Smith_40.jpg \n inflating: /data/test/Maggie_Smith_41.jpg \n inflating: /data/test/Mahathir_Mohamad_00.jpg \n inflating: /data/test/Mahathir_Mohamad_01.jpg \n inflating: /data/test/Mahathir_Mohamad_10.jpg \n inflating: /data/test/Mahathir_Mohamad_11.jpg \n inflating: /data/test/Mahathir_Mohamad_20.jpg \n inflating: /data/test/Mahathir_Mohamad_21.jpg \n inflating: /data/test/Mahathir_Mohamad_30.jpg \n inflating: /data/test/Mahathir_Mohamad_31.jpg \n inflating: /data/test/Malcolm_Jamal_Warner_00.jpg \n inflating: /data/test/Malcolm_Jamal_Warner_01.jpg \n inflating: /data/test/Malcolm_Jamal_Warner_10.jpg \n inflating: /data/test/Malcolm_Jamal_Warner_11.jpg \n inflating: /data/test/Malcolm_Jamal_Warner_20.jpg \n inflating: /data/test/Malcolm_Jamal_Warner_21.jpg \n inflating: /data/test/Manuel_Pellegrini_10.jpg \n inflating: /data/test/Manuel_Pellegrini_11.jpg \n inflating: /data/test/Manuel_Pellegrini_20.jpg \n inflating: /data/test/Manuel_Pellegrini_21.jpg \n inflating: /data/test/Manuel_Pellegrini_30.jpg \n inflating: /data/test/Manuel_Pellegrini_31.jpg \n inflating: /data/test/Marc_Anthony_10.jpg \n inflating: /data/test/Marc_Anthony_11.jpg \n inflating: /data/test/Marc_Anthony_20.jpg \n inflating: /data/test/Marc_Anthony_21.jpg \n inflating: /data/test/Marc_Anthony_50.jpg \n inflating: /data/test/Marc_Anthony_51.jpg \n inflating: /data/test/Marc_Racicot_00.jpg \n inflating: /data/test/Marc_Racicot_01.jpg \n inflating: /data/test/Marc_Racicot_20.jpg \n inflating: /data/test/Marc_Racicot_21.jpg \n inflating: /data/test/Marc_Racicot_40.jpg \n inflating: /data/test/Marc_Racicot_41.jpg \n inflating: /data/test/Marc_Racicot_50.jpg \n inflating: /data/test/Marc_Racicot_51.jpg \n inflating: /data/test/Marc_Shaiman_10.jpg \n inflating: /data/test/Marc_Shaiman_11.jpg \n inflating: /data/test/Marc_Shaiman_20.jpg \n inflating: /data/test/Marc_Shaiman_21.jpg \n inflating: /data/test/Marc_Shaiman_30.jpg \n inflating: /data/test/Marc_Shaiman_31.jpg \n inflating: /data/test/Margaret_Thatcher_10.jpg \n inflating: /data/test/Margaret_Thatcher_11.jpg \n inflating: /data/test/Margaret_Thatcher_30.jpg \n inflating: /data/test/Margaret_Thatcher_31.jpg \n inflating: /data/test/Margaret_Thatcher_40.jpg \n inflating: /data/test/Margaret_Thatcher_41.jpg \n inflating: /data/test/Margaret_Thatcher_50.jpg \n inflating: /data/test/Margaret_Thatcher_51.jpg \n inflating: /data/test/Maria_Soledad_Alvear_Valenzuela_10.jpg \n inflating: /data/test/Maria_Soledad_Alvear_Valenzuela_11.jpg \n inflating: /data/test/Maria_Soledad_Alvear_Valenzuela_30.jpg \n inflating: /data/test/Maria_Soledad_Alvear_Valenzuela_31.jpg \n inflating: /data/test/Maria_Soledad_Alvear_Valenzuela_40.jpg \n inflating: /data/test/Maria_Soledad_Alvear_Valenzuela_41.jpg \n inflating: /data/test/Mariana_Ohata_00.jpg \n inflating: /data/test/Mariana_Ohata_01.jpg \n inflating: /data/test/Mariana_Ohata_20.jpg \n inflating: /data/test/Mariana_Ohata_21.jpg \n inflating: /data/test/Mariana_Ohata_30.jpg \n inflating: /data/test/Mariana_Ohata_31.jpg \n inflating: /data/test/Marieta_Chrousala_00.jpg \n inflating: /data/test/Marieta_Chrousala_01.jpg \n inflating: /data/test/Marieta_Chrousala_10.jpg \n inflating: /data/test/Marieta_Chrousala_11.jpg \n inflating: /data/test/Marieta_Chrousala_40.jpg \n inflating: /data/test/Marieta_Chrousala_41.jpg \n inflating: /data/test/Marina_Silva_10.jpg \n inflating: /data/test/Marina_Silva_11.jpg \n inflating: /data/test/Marina_Silva_20.jpg \n inflating: /data/test/Marina_Silva_21.jpg \n inflating: /data/test/Marina_Silva_40.jpg \n inflating: /data/test/Marina_Silva_41.jpg \n inflating: /data/test/Marina_Silva_50.jpg \n inflating: /data/test/Marina_Silva_51.jpg \n inflating: /data/test/Mario_Kreutzberger_20.jpg \n inflating: /data/test/Mario_Kreutzberger_21.jpg \n inflating: /data/test/Mario_Kreutzberger_30.jpg \n inflating: /data/test/Mario_Kreutzberger_31.jpg \n inflating: /data/test/Mario_Kreutzberger_40.jpg \n inflating: /data/test/Mario_Kreutzberger_41.jpg \n inflating: /data/test/Marisa_Tomei_10.jpg \n inflating: /data/test/Marisa_Tomei_11.jpg \n inflating: /data/test/Marisa_Tomei_20.jpg \n inflating: /data/test/Marisa_Tomei_21.jpg \n inflating: /data/test/Marisa_Tomei_40.jpg \n inflating: /data/test/Marisa_Tomei_41.jpg \n inflating: /data/test/Marissa_Jaret_Winokur_00.jpg \n inflating: /data/test/Marissa_Jaret_Winokur_01.jpg \n inflating: /data/test/Marissa_Jaret_Winokur_30.jpg \n inflating: /data/test/Marissa_Jaret_Winokur_31.jpg \n inflating: /data/test/Marissa_Jaret_Winokur_40.jpg \n inflating: /data/test/Marissa_Jaret_Winokur_41.jpg \n inflating: /data/test/Mark_Foley_10.jpg \n inflating: /data/test/Mark_Foley_11.jpg \n inflating: /data/test/Mark_Foley_40.jpg \n inflating: /data/test/Mark_Foley_41.jpg \n inflating: /data/test/Mark_Foley_50.jpg \n inflating: /data/test/Mark_Foley_51.jpg \n inflating: /data/test/Mark_Leno_10.jpg \n inflating: /data/test/Mark_Leno_11.jpg \n inflating: /data/test/Mark_Leno_20.jpg \n inflating: /data/test/Mark_Leno_21.jpg \n inflating: /data/test/Mark_Leno_30.jpg \n inflating: /data/test/Mark_Leno_31.jpg \n inflating: /data/test/Martin_Luther_King_III_00.jpg \n inflating: /data/test/Martin_Luther_King_III_01.jpg \n inflating: /data/test/Martin_Luther_King_III_30.jpg \n inflating: /data/test/Martin_Luther_King_III_31.jpg \n inflating: /data/test/Martin_Luther_King_III_50.jpg \n inflating: /data/test/Martin_Luther_King_III_51.jpg \n inflating: /data/test/Martin_Sheen_00.jpg \n inflating: /data/test/Martin_Sheen_01.jpg \n inflating: /data/test/Martin_Sheen_30.jpg \n inflating: /data/test/Martin_Sheen_31.jpg \n inflating: /data/test/Martin_Sheen_40.jpg \n inflating: /data/test/Martin_Sheen_41.jpg \n inflating: /data/test/Martin_Sheen_50.jpg \n inflating: /data/test/Martin_Sheen_51.jpg \n inflating: /data/test/Mary_Landrieu_00.jpg \n inflating: /data/test/Mary_Landrieu_01.jpg \n inflating: /data/test/Mary_Landrieu_20.jpg \n inflating: /data/test/Mary_Landrieu_21.jpg \n inflating: /data/test/Mary_Landrieu_30.jpg \n inflating: /data/test/Mary_Landrieu_31.jpg \n inflating: /data/test/Mary_Robinson_10.jpg \n inflating: /data/test/Mary_Robinson_11.jpg \n inflating: /data/test/Mary_Robinson_20.jpg \n inflating: /data/test/Mary_Robinson_21.jpg \n inflating: /data/test/Mary_Robinson_40.jpg \n inflating: /data/test/Mary_Robinson_41.jpg \n inflating: /data/test/Mary_Robinson_50.jpg \n inflating: /data/test/Mary_Robinson_51.jpg \n inflating: /data/test/Massoud_Barzani_00.jpg \n inflating: /data/test/Massoud_Barzani_01.jpg \n inflating: /data/test/Massoud_Barzani_10.jpg \n inflating: /data/test/Massoud_Barzani_11.jpg \n inflating: /data/test/Massoud_Barzani_20.jpg \n inflating: /data/test/Massoud_Barzani_21.jpg \n inflating: /data/test/Massoud_Barzani_40.jpg \n inflating: /data/test/Massoud_Barzani_41.jpg \n inflating: /data/test/Matt_LeBlanc_00.jpg \n inflating: /data/test/Matt_LeBlanc_01.jpg \n inflating: /data/test/Matt_LeBlanc_20.jpg \n inflating: /data/test/Matt_LeBlanc_21.jpg \n inflating: /data/test/Matt_LeBlanc_30.jpg \n inflating: /data/test/Matt_LeBlanc_31.jpg \n inflating: /data/test/Nancy_Kerrigan_00.jpg \n inflating: /data/test/Nancy_Kerrigan_01.jpg \n inflating: /data/test/Nancy_Kerrigan_20.jpg \n inflating: /data/test/Nancy_Kerrigan_21.jpg \n inflating: /data/test/Nancy_Kerrigan_30.jpg \n inflating: /data/test/Nancy_Kerrigan_31.jpg \n inflating: /data/test/Nancy_Kerrigan_40.jpg \n inflating: /data/test/Nancy_Kerrigan_41.jpg \n inflating: /data/test/Nancy_Reagan_00.jpg \n inflating: /data/test/Nancy_Reagan_01.jpg \n inflating: /data/test/Nancy_Reagan_10.jpg \n inflating: /data/test/Nancy_Reagan_11.jpg \n inflating: /data/test/Nancy_Reagan_30.jpg \n inflating: /data/test/Nancy_Reagan_31.jpg \n inflating: /data/test/Nancy_Reagan_40.jpg \n inflating: /data/test/Nancy_Reagan_41.jpg \n inflating: /data/test/Nanni_Moretti_10.jpg \n inflating: /data/test/Nanni_Moretti_11.jpg \n inflating: /data/test/Nanni_Moretti_20.jpg \n inflating: /data/test/Nanni_Moretti_21.jpg \n inflating: /data/test/Nanni_Moretti_40.jpg \n inflating: /data/test/Nanni_Moretti_41.jpg \n inflating: /data/test/Natalia_Vodonova_00.jpg \n inflating: /data/test/Natalia_Vodonova_01.jpg \n inflating: /data/test/Natalia_Vodonova_10.jpg \n inflating: /data/test/Natalia_Vodonova_11.jpg \n inflating: /data/test/Natalia_Vodonova_20.jpg \n inflating: /data/test/Natalia_Vodonova_21.jpg \n inflating: /data/test/Natasha_Lyonne_00.jpg \n inflating: /data/test/Natasha_Lyonne_01.jpg \n inflating: /data/test/Natasha_Lyonne_10.jpg \n inflating: /data/test/Natasha_Lyonne_11.jpg \n inflating: /data/test/Natasha_Lyonne_40.jpg \n inflating: /data/test/Natasha_Lyonne_41.jpg \n inflating: /data/test/Nick_Reilly_10.jpg \n inflating: /data/test/Nick_Reilly_11.jpg \n inflating: /data/test/Nick_Reilly_40.jpg \n inflating: /data/test/Nick_Reilly_41.jpg \n inflating: /data/test/Nick_Reilly_50.jpg \n inflating: /data/test/Nick_Reilly_51.jpg \n inflating: /data/test/Nicolas_Eyzaguirre_00.jpg \n inflating: /data/test/Nicolas_Eyzaguirre_01.jpg \n inflating: /data/test/Nicolas_Eyzaguirre_10.jpg \n inflating: /data/test/Nicolas_Eyzaguirre_11.jpg \n inflating: /data/test/Nicolas_Eyzaguirre_20.jpg \n inflating: /data/test/Nicolas_Eyzaguirre_21.jpg \n inflating: /data/test/Nicolas_Sarkozy_00.jpg \n inflating: /data/test/Nicolas_Sarkozy_01.jpg \n inflating: /data/test/Nicolas_Sarkozy_10.jpg \n inflating: /data/test/Nicolas_Sarkozy_11.jpg \n inflating: /data/test/Nicolas_Sarkozy_20.jpg \n inflating: /data/test/Nicolas_Sarkozy_21.jpg \n inflating: /data/test/Nicolas_Sarkozy_50.jpg \n inflating: /data/test/Nicolas_Sarkozy_51.jpg \n inflating: /data/test/Nina_Jacobson_00.jpg \n inflating: /data/test/Nina_Jacobson_01.jpg \n inflating: /data/test/Nina_Jacobson_10.jpg \n inflating: /data/test/Nina_Jacobson_11.jpg \n inflating: /data/test/Nina_Jacobson_30.jpg \n inflating: /data/test/Nina_Jacobson_31.jpg \n inflating: /data/test/Norah_Jones_10.jpg \n inflating: /data/test/Norah_Jones_11.jpg \n inflating: /data/test/Norah_Jones_20.jpg \n inflating: /data/test/Norah_Jones_21.jpg \n inflating: /data/test/Norah_Jones_40.jpg \n inflating: /data/test/Norah_Jones_41.jpg \n inflating: /data/test/Norah_Jones_50.jpg \n inflating: /data/test/Norah_Jones_51.jpg \n inflating: /data/test/Norman_Mineta_00.jpg \n inflating: /data/test/Norman_Mineta_01.jpg \n inflating: /data/test/Norman_Mineta_30.jpg \n inflating: /data/test/Norman_Mineta_31.jpg \n inflating: /data/test/Norman_Mineta_50.jpg \n inflating: /data/test/Norman_Mineta_51.jpg \n inflating: /data/test/Olene_Walker_00.jpg \n inflating: /data/test/Olene_Walker_01.jpg \n inflating: /data/test/Olene_Walker_10.jpg \n inflating: /data/test/Olene_Walker_11.jpg \n inflating: /data/test/Olene_Walker_30.jpg \n inflating: /data/test/Olene_Walker_31.jpg \n inflating: /data/test/Olene_Walker_40.jpg \n inflating: /data/test/Olene_Walker_41.jpg \n inflating: /data/test/Olivia_Newton-John_00.jpg \n inflating: /data/test/Olivia_Newton-John_01.jpg \n inflating: /data/test/Olivia_Newton-John_10.jpg \n inflating: /data/test/Olivia_Newton-John_11.jpg \n inflating: /data/test/Olivia_Newton-John_40.jpg \n inflating: /data/test/Olivia_Newton-John_41.jpg \n inflating: /data/test/Orlando_Bloom_00.jpg \n inflating: /data/test/Orlando_Bloom_01.jpg \n inflating: /data/test/Orlando_Bloom_30.jpg \n inflating: /data/test/Orlando_Bloom_31.jpg \n inflating: /data/test/Orlando_Bloom_40.jpg \n inflating: /data/test/Orlando_Bloom_41.jpg \n inflating: /data/test/Orlando_Bloom_50.jpg \n inflating: /data/test/Orlando_Bloom_51.jpg \n inflating: /data/test/Otto_Reich_00.jpg \n inflating: /data/test/Otto_Reich_01.jpg \n inflating: /data/test/Otto_Reich_10.jpg \n inflating: /data/test/Otto_Reich_11.jpg \n inflating: /data/test/Otto_Reich_30.jpg \n inflating: /data/test/Otto_Reich_31.jpg \n inflating: /data/test/Otto_Reich_40.jpg \n inflating: /data/test/Otto_Reich_41.jpg \n inflating: /data/test/Pat_Riley_00.jpg \n inflating: /data/test/Pat_Riley_01.jpg \n inflating: /data/test/Pat_Riley_20.jpg \n inflating: /data/test/Pat_Riley_21.jpg \n inflating: /data/test/Pat_Riley_50.jpg \n inflating: /data/test/Pat_Riley_51.jpg \n inflating: /data/test/Patrick_Leahy_10.jpg \n inflating: /data/test/Patrick_Leahy_11.jpg \n inflating: /data/test/Patrick_Leahy_20.jpg \n inflating: /data/test/Patrick_Leahy_21.jpg \n inflating: /data/test/Patrick_Leahy_30.jpg \n inflating: /data/test/Patrick_Leahy_31.jpg \n inflating: /data/test/Paul_Otellini_00.jpg \n inflating: /data/test/Paul_Otellini_01.jpg \n inflating: /data/test/Paul_Otellini_10.jpg \n inflating: /data/test/Paul_Otellini_11.jpg \n inflating: /data/test/Paul_Otellini_20.jpg \n inflating: /data/test/Paul_Otellini_21.jpg \n inflating: /data/test/Paul_Reiser_00.jpg \n inflating: /data/test/Paul_Reiser_01.jpg \n inflating: /data/test/Paul_Reiser_20.jpg \n inflating: /data/test/Paul_Reiser_21.jpg \n inflating: /data/test/Paul_Reiser_30.jpg \n inflating: /data/test/Paul_Reiser_31.jpg \n inflating: /data/test/Pedro_Solbes_00.jpg \n inflating: /data/test/Pedro_Solbes_01.jpg \n inflating: /data/test/Pedro_Solbes_20.jpg \n inflating: /data/test/Pedro_Solbes_21.jpg \n inflating: /data/test/Pedro_Solbes_30.jpg \n inflating: /data/test/Pedro_Solbes_31.jpg \n inflating: /data/test/Penelope_Ann_Miller_00.jpg \n inflating: /data/test/Penelope_Ann_Miller_01.jpg \n inflating: /data/test/Penelope_Ann_Miller_20.jpg \n inflating: /data/test/Penelope_Ann_Miller_21.jpg \n inflating: /data/test/Penelope_Ann_Miller_50.jpg \n inflating: /data/test/Penelope_Ann_Miller_51.jpg \n inflating: /data/test/Peter_Goldmark_10.jpg \n inflating: /data/test/Peter_Goldmark_11.jpg \n inflating: /data/test/Peter_Goldmark_40.jpg \n inflating: /data/test/Peter_Goldmark_41.jpg \n inflating: /data/test/Peter_Goldmark_50.jpg \n inflating: /data/test/Peter_Goldmark_51.jpg \n inflating: /data/test/Peter_Medgyessy_10.jpg \n inflating: /data/test/Peter_Medgyessy_11.jpg \n inflating: /data/test/Peter_Medgyessy_30.jpg \n inflating: /data/test/Peter_Medgyessy_31.jpg \n inflating: /data/test/Peter_Medgyessy_40.jpg \n inflating: /data/test/Peter_Medgyessy_41.jpg \n inflating: /data/test/Peter_Medgyessy_50.jpg \n inflating: /data/test/Peter_Medgyessy_51.jpg \n inflating: /data/test/Philippe_Gagnon_00.jpg \n inflating: /data/test/Philippe_Gagnon_01.jpg \n inflating: /data/test/Philippe_Gagnon_10.jpg \n inflating: /data/test/Philippe_Gagnon_11.jpg \n inflating: /data/test/Philippe_Gagnon_20.jpg \n inflating: /data/test/Philippe_Gagnon_21.jpg \n inflating: /data/test/Philippe_Gagnon_30.jpg \n inflating: /data/test/Philippe_Gagnon_31.jpg \n inflating: /data/test/Philippe_Noiret_10.jpg \n inflating: /data/test/Philippe_Noiret_11.jpg \n inflating: /data/test/Philippe_Noiret_30.jpg \n inflating: /data/test/Philippe_Noiret_31.jpg \n inflating: /data/test/Philippe_Noiret_50.jpg \n inflating: /data/test/Philippe_Noiret_51.jpg \n inflating: /data/test/Picabo_Street_00.jpg \n inflating: /data/test/Picabo_Street_01.jpg \n inflating: /data/test/Picabo_Street_20.jpg \n inflating: /data/test/Picabo_Street_21.jpg \n inflating: /data/test/Picabo_Street_40.jpg \n inflating: /data/test/Picabo_Street_41.jpg \n inflating: /data/test/Pilar_Montenegro_10.jpg \n inflating: /data/test/Pilar_Montenegro_11.jpg \n inflating: /data/test/Pilar_Montenegro_20.jpg \n inflating: /data/test/Pilar_Montenegro_21.jpg \n inflating: /data/test/Pilar_Montenegro_50.jpg \n inflating: /data/test/Pilar_Montenegro_51.jpg \n inflating: /data/test/Piotr_Anderszewski_20.jpg \n inflating: /data/test/Piotr_Anderszewski_21.jpg \n inflating: /data/test/Piotr_Anderszewski_30.jpg \n inflating: /data/test/Piotr_Anderszewski_31.jpg \n inflating: /data/test/Piotr_Anderszewski_50.jpg \n inflating: /data/test/Piotr_Anderszewski_51.jpg \n inflating: /data/test/Poala_Suarez_30.jpg \n inflating: /data/test/Poala_Suarez_31.jpg \n inflating: /data/test/Poala_Suarez_40.jpg \n inflating: /data/test/Poala_Suarez_41.jpg \n inflating: /data/test/Poala_Suarez_50.jpg \n inflating: /data/test/Poala_Suarez_51.jpg \n inflating: /data/test/Prince_Harry_10.jpg \n inflating: /data/test/Prince_Harry_11.jpg \n inflating: /data/test/Prince_Harry_20.jpg \n inflating: /data/test/Prince_Harry_21.jpg \n inflating: /data/test/Prince_Harry_40.jpg \n inflating: /data/test/Prince_Harry_41.jpg \n inflating: /data/test/Princess_Stephanie_00.jpg \n inflating: /data/test/Princess_Stephanie_01.jpg \n inflating: /data/test/Princess_Stephanie_20.jpg \n inflating: /data/test/Princess_Stephanie_21.jpg \n inflating: /data/test/Princess_Stephanie_40.jpg \n inflating: /data/test/Princess_Stephanie_41.jpg \n inflating: /data/test/Princess_Stephanie_50.jpg \n inflating: /data/test/Princess_Stephanie_51.jpg \n inflating: /data/test/Priyanka_Chopra_10.jpg \n inflating: /data/test/Priyanka_Chopra_11.jpg \n inflating: /data/test/Priyanka_Chopra_40.jpg \n inflating: /data/test/Priyanka_Chopra_41.jpg \n inflating: /data/test/Priyanka_Chopra_50.jpg \n inflating: /data/test/Priyanka_Chopra_51.jpg \n inflating: /data/test/Queen_Noor_10.jpg \n inflating: /data/test/Queen_Noor_11.jpg \n inflating: /data/test/Queen_Noor_30.jpg \n inflating: /data/test/Queen_Noor_31.jpg \n inflating: /data/test/Queen_Noor_50.jpg \n inflating: /data/test/Queen_Noor_51.jpg \n inflating: /data/test/Queen_Rania_10.jpg \n inflating: /data/test/Queen_Rania_11.jpg \n inflating: /data/test/Queen_Rania_30.jpg \n inflating: /data/test/Queen_Rania_31.jpg \n inflating: /data/test/Queen_Rania_50.jpg \n inflating: /data/test/Queen_Rania_51.jpg \n inflating: /data/test/Rachel_Hunter_30.jpg \n inflating: /data/test/Rachel_Hunter_31.jpg \n inflating: /data/test/Rachel_Hunter_40.jpg \n inflating: /data/test/Rachel_Hunter_41.jpg \n inflating: /data/test/Rachel_Hunter_50.jpg \n inflating: /data/test/Rachel_Hunter_51.jpg \n inflating: /data/test/Raja_Zafar-ul-Haq_00.jpg \n inflating: /data/test/Raja_Zafar-ul-Haq_01.jpg \n inflating: /data/test/Raja_Zafar-ul-Haq_10.jpg \n inflating: /data/test/Raja_Zafar-ul-Haq_11.jpg \n inflating: /data/test/Raja_Zafar-ul-Haq_20.jpg \n inflating: /data/test/Raja_Zafar-ul-Haq_21.jpg \n inflating: /data/test/Raja_Zafar-ul-Haq_30.jpg \n inflating: /data/test/Raja_Zafar-ul-Haq_31.jpg \n inflating: /data/test/Ralph_Klein_00.jpg \n inflating: /data/test/Ralph_Klein_01.jpg \n inflating: /data/test/Ralph_Klein_10.jpg \n inflating: /data/test/Ralph_Klein_11.jpg \n inflating: /data/test/Ralph_Klein_30.jpg \n inflating: /data/test/Ralph_Klein_31.jpg \n inflating: /data/test/Raza_Rabbani_20.jpg \n inflating: /data/test/Raza_Rabbani_21.jpg \n inflating: /data/test/Raza_Rabbani_30.jpg \n inflating: /data/test/Raza_Rabbani_31.jpg \n inflating: /data/test/Raza_Rabbani_50.jpg \n inflating: /data/test/Raza_Rabbani_51.jpg \n inflating: /data/test/Recep_Tayyip_Erdogan_00.jpg \n inflating: /data/test/Recep_Tayyip_Erdogan_01.jpg \n inflating: /data/test/Recep_Tayyip_Erdogan_20.jpg \n inflating: /data/test/Recep_Tayyip_Erdogan_21.jpg \n inflating: /data/test/Recep_Tayyip_Erdogan_40.jpg \n inflating: /data/test/Recep_Tayyip_Erdogan_41.jpg \n inflating: /data/test/Reese_Witherspoon_00.jpg \n inflating: /data/test/Reese_Witherspoon_01.jpg \n inflating: /data/test/Reese_Witherspoon_10.jpg \n inflating: /data/test/Reese_Witherspoon_11.jpg \n inflating: /data/test/Reese_Witherspoon_40.jpg \n inflating: /data/test/Reese_Witherspoon_41.jpg \n inflating: /data/test/Ricardo_Lopez_Murphy_10.jpg \n inflating: /data/test/Ricardo_Lopez_Murphy_11.jpg \n inflating: /data/test/Ricardo_Lopez_Murphy_30.jpg \n inflating: /data/test/Ricardo_Lopez_Murphy_31.jpg \n inflating: /data/test/Ricardo_Lopez_Murphy_40.jpg \n inflating: /data/test/Ricardo_Lopez_Murphy_41.jpg \n inflating: /data/test/Ricardo_Sanchez_20.jpg \n inflating: /data/test/Ricardo_Sanchez_21.jpg \n inflating: /data/test/Ricardo_Sanchez_30.jpg \n inflating: /data/test/Ricardo_Sanchez_31.jpg \n inflating: /data/test/Ricardo_Sanchez_40.jpg \n inflating: /data/test/Ricardo_Sanchez_41.jpg \n inflating: /data/test/Richard_Branson_00.jpg \n inflating: /data/test/Richard_Branson_01.jpg \n inflating: /data/test/Richard_Branson_10.jpg \n inflating: /data/test/Richard_Branson_11.jpg \n inflating: /data/test/Richard_Branson_50.jpg \n inflating: /data/test/Richard_Branson_51.jpg \n inflating: /data/test/Richard_Lennon_00.jpg \n inflating: /data/test/Richard_Lennon_01.jpg \n inflating: /data/test/Richard_Lennon_30.jpg \n inflating: /data/test/Richard_Lennon_31.jpg \n inflating: /data/test/Richard_Lennon_40.jpg \n inflating: /data/test/Richard_Lennon_41.jpg \n inflating: /data/test/Richard_Lugar_00.jpg \n inflating: /data/test/Richard_Lugar_01.jpg \n inflating: /data/test/Richard_Lugar_10.jpg \n inflating: /data/test/Richard_Lugar_11.jpg \n inflating: /data/test/Richard_Lugar_20.jpg \n inflating: /data/test/Richard_Lugar_21.jpg \n inflating: /data/test/Richard_Lugar_50.jpg \n inflating: /data/test/Richard_Lugar_51.jpg \n inflating: /data/test/Richard_Paul_Evans_00.jpg \n inflating: /data/test/Richard_Paul_Evans_01.jpg \n inflating: /data/test/Richard_Paul_Evans_20.jpg \n inflating: /data/test/Richard_Paul_Evans_21.jpg \n inflating: /data/test/Richard_Paul_Evans_40.jpg \n inflating: /data/test/Richard_Paul_Evans_41.jpg \n inflating: /data/test/Richard_Paul_Evans_50.jpg \n inflating: /data/test/Richard_Paul_Evans_51.jpg \n inflating: /data/test/Rick_Bragg_20.jpg \n inflating: /data/test/Rick_Bragg_21.jpg \n inflating: /data/test/Rick_Bragg_30.jpg \n inflating: /data/test/Rick_Bragg_31.jpg \n inflating: /data/test/Rick_Bragg_50.jpg \n inflating: /data/test/Rick_Bragg_51.jpg \n inflating: /data/test/Ridley_Scott_10.jpg \n inflating: /data/test/Ridley_Scott_11.jpg \n inflating: /data/test/Ridley_Scott_20.jpg \n inflating: /data/test/Ridley_Scott_21.jpg \n inflating: /data/test/Ridley_Scott_30.jpg \n inflating: /data/test/Ridley_Scott_31.jpg \n inflating: /data/test/Robbie_Coltrane_00.jpg \n inflating: /data/test/Robbie_Coltrane_01.jpg \n inflating: /data/test/Robbie_Coltrane_10.jpg \n inflating: /data/test/Robbie_Coltrane_11.jpg \n inflating: /data/test/Robbie_Coltrane_20.jpg \n inflating: /data/test/Robbie_Coltrane_21.jpg \n inflating: /data/test/Robbie_Coltrane_50.jpg \n inflating: /data/test/Robbie_Coltrane_51.jpg \n inflating: /data/test/Robert_Altman_10.jpg \n inflating: /data/test/Robert_Altman_11.jpg \n inflating: /data/test/Robert_Altman_20.jpg \n inflating: /data/test/Robert_Altman_21.jpg \n inflating: /data/test/Robert_Altman_50.jpg \n inflating: /data/test/Robert_Altman_51.jpg \n inflating: /data/test/Roberto_Benigni_00.jpg \n inflating: /data/test/Roberto_Benigni_01.jpg \n inflating: /data/test/Roberto_Benigni_10.jpg \n inflating: /data/test/Roberto_Benigni_11.jpg \n inflating: /data/test/Roberto_Benigni_50.jpg \n inflating: /data/test/Roberto_Benigni_51.jpg \n inflating: /data/test/Rocco_Buttiglione_00.jpg \n inflating: /data/test/Rocco_Buttiglione_01.jpg \n inflating: /data/test/Rocco_Buttiglione_10.jpg \n inflating: /data/test/Rocco_Buttiglione_11.jpg \n inflating: /data/test/Rocco_Buttiglione_30.jpg \n inflating: /data/test/Rocco_Buttiglione_31.jpg \n inflating: /data/test/Rocco_Buttiglione_40.jpg \n inflating: /data/test/Rocco_Buttiglione_41.jpg \n inflating: /data/test/Rodrigo_Borja_00.jpg \n inflating: /data/test/Rodrigo_Borja_01.jpg \n inflating: /data/test/Rodrigo_Borja_40.jpg \n inflating: /data/test/Rodrigo_Borja_41.jpg \n inflating: /data/test/Rodrigo_Borja_50.jpg \n inflating: /data/test/Rodrigo_Borja_51.jpg \n inflating: /data/test/Saeed_Mortazavi_10.jpg \n inflating: /data/test/Saeed_Mortazavi_11.jpg \n inflating: /data/test/Saeed_Mortazavi_20.jpg \n inflating: /data/test/Saeed_Mortazavi_21.jpg \n inflating: /data/test/Saeed_Mortazavi_50.jpg \n inflating: /data/test/Saeed_Mortazavi_51.jpg \n inflating: /data/test/Sally_Ride_00.jpg \n inflating: /data/test/Sally_Ride_01.jpg \n inflating: /data/test/Sally_Ride_40.jpg \n inflating: /data/test/Sally_Ride_41.jpg \n inflating: /data/test/Sally_Ride_50.jpg \n inflating: /data/test/Sally_Ride_51.jpg \n inflating: /data/test/Sanjay_Gupta_10.jpg \n inflating: /data/test/Sanjay_Gupta_11.jpg \n inflating: /data/test/Sanjay_Gupta_20.jpg \n inflating: /data/test/Sanjay_Gupta_21.jpg \n inflating: /data/test/Sanjay_Gupta_40.jpg \n inflating: /data/test/Sanjay_Gupta_41.jpg \n inflating: /data/test/Sara_Silverman_10.jpg \n inflating: /data/test/Sara_Silverman_11.jpg \n inflating: /data/test/Sara_Silverman_20.jpg \n inflating: /data/test/Sara_Silverman_21.jpg \n inflating: /data/test/Sara_Silverman_40.jpg \n inflating: /data/test/Sara_Silverman_41.jpg \n inflating: /data/test/Sara_Silverman_50.jpg \n inflating: /data/test/Sara_Silverman_51.jpg \n inflating: /data/test/Sarah_Wynter_00.jpg \n inflating: /data/test/Sarah_Wynter_01.jpg \n inflating: /data/test/Sarah_Wynter_40.jpg \n inflating: /data/test/Sarah_Wynter_41.jpg \n inflating: /data/test/Sarah_Wynter_50.jpg \n inflating: /data/test/Sarah_Wynter_51.jpg \n inflating: /data/test/Sasha_Cohen_20.jpg \n inflating: /data/test/Sasha_Cohen_21.jpg \n inflating: /data/test/Sasha_Cohen_40.jpg \n inflating: /data/test/Sasha_Cohen_41.jpg \n inflating: /data/test/Sasha_Cohen_50.jpg \n inflating: /data/test/Sasha_Cohen_51.jpg \n inflating: /data/test/T_Boone_Pickens_10.jpg \n inflating: /data/test/T_Boone_Pickens_11.jpg \n inflating: /data/test/T_Boone_Pickens_20.jpg \n inflating: /data/test/T_Boone_Pickens_21.jpg \n inflating: /data/test/T_Boone_Pickens_30.jpg \n inflating: /data/test/T_Boone_Pickens_31.jpg \n inflating: /data/test/T_Boone_Pickens_50.jpg \n inflating: /data/test/T_Boone_Pickens_51.jpg \n inflating: /data/test/Takeo_Hiranuma_00.jpg \n inflating: /data/test/Takeo_Hiranuma_01.jpg \n inflating: /data/test/Takeo_Hiranuma_10.jpg \n inflating: /data/test/Takeo_Hiranuma_11.jpg \n inflating: /data/test/Takeo_Hiranuma_30.jpg \n inflating: /data/test/Takeo_Hiranuma_31.jpg \n inflating: /data/test/Ted_Turner_20.jpg \n inflating: /data/test/Ted_Turner_21.jpg \n inflating: /data/test/Ted_Turner_30.jpg \n inflating: /data/test/Ted_Turner_31.jpg \n inflating: /data/test/Ted_Turner_50.jpg \n inflating: /data/test/Ted_Turner_51.jpg \n inflating: /data/test/Teresa_Heinz_Kerry_00.jpg \n inflating: /data/test/Teresa_Heinz_Kerry_01.jpg \n inflating: /data/test/Teresa_Heinz_Kerry_10.jpg \n inflating: /data/test/Teresa_Heinz_Kerry_11.jpg \n inflating: /data/test/Teresa_Heinz_Kerry_20.jpg \n inflating: /data/test/Teresa_Heinz_Kerry_21.jpg \n inflating: /data/test/Terje_Roed-Larsen_00.jpg \n inflating: /data/test/Terje_Roed-Larsen_01.jpg \n inflating: /data/test/Terje_Roed-Larsen_20.jpg \n inflating: /data/test/Terje_Roed-Larsen_21.jpg \n inflating: /data/test/Terje_Roed-Larsen_30.jpg \n inflating: /data/test/Terje_Roed-Larsen_31.jpg \n inflating: /data/test/Tessa_Jowell_00.jpg \n inflating: /data/test/Tessa_Jowell_01.jpg \n inflating: /data/test/Tessa_Jowell_20.jpg \n inflating: /data/test/Tessa_Jowell_21.jpg \n inflating: /data/test/Tessa_Jowell_30.jpg \n inflating: /data/test/Tessa_Jowell_31.jpg \n inflating: /data/test/Tessa_Jowell_50.jpg \n inflating: /data/test/Tessa_Jowell_51.jpg \n inflating: /data/test/Thomas_Ferguson_00.jpg \n inflating: /data/test/Thomas_Ferguson_01.jpg \n inflating: /data/test/Thomas_Ferguson_10.jpg \n inflating: /data/test/Thomas_Ferguson_11.jpg \n inflating: /data/test/Thomas_Ferguson_50.jpg \n inflating: /data/test/Thomas_Ferguson_51.jpg \n inflating: /data/test/Tim_Howard_00.jpg \n inflating: /data/test/Tim_Howard_01.jpg \n inflating: /data/test/Tim_Howard_10.jpg \n inflating: /data/test/Tim_Howard_11.jpg \n inflating: /data/test/Tim_Howard_30.jpg \n inflating: /data/test/Tim_Howard_31.jpg \n inflating: /data/test/Tim_Pawlenty_00.jpg \n inflating: /data/test/Tim_Pawlenty_01.jpg \n inflating: /data/test/Tim_Pawlenty_30.jpg \n inflating: /data/test/Tim_Pawlenty_31.jpg \n inflating: /data/test/Tim_Pawlenty_40.jpg \n inflating: /data/test/Tim_Pawlenty_41.jpg \n inflating: /data/test/Tim_Pawlenty_50.jpg \n inflating: /data/test/Tim_Pawlenty_51.jpg \n inflating: /data/test/Timothy_Goebel_00.jpg \n inflating: /data/test/Timothy_Goebel_01.jpg \n inflating: /data/test/Timothy_Goebel_30.jpg \n inflating: /data/test/Timothy_Goebel_31.jpg \n inflating: /data/test/Timothy_Goebel_40.jpg \n inflating: /data/test/Timothy_Goebel_41.jpg \n inflating: /data/test/Timothy_Goebel_50.jpg \n inflating: /data/test/Timothy_Goebel_51.jpg \n inflating: /data/test/Tina_Brown_20.jpg \n inflating: /data/test/Tina_Brown_21.jpg \n inflating: /data/test/Tina_Brown_40.jpg \n inflating: /data/test/Tina_Brown_41.jpg \n inflating: /data/test/Tina_Brown_50.jpg \n inflating: /data/test/Tina_Brown_51.jpg \n inflating: /data/test/Tom_Coughlin_20.jpg \n inflating: /data/test/Tom_Coughlin_21.jpg \n inflating: /data/test/Tom_Coughlin_30.jpg \n inflating: /data/test/Tom_Coughlin_31.jpg \n inflating: /data/test/Tom_Coughlin_50.jpg \n inflating: /data/test/Tom_Coughlin_51.jpg \n inflating: /data/test/Tom_Hanks_30.jpg \n inflating: /data/test/Tom_Hanks_31.jpg \n inflating: /data/test/Tom_Hanks_40.jpg \n inflating: /data/test/Tom_Hanks_41.jpg \n inflating: /data/test/Tom_Hanks_50.jpg \n inflating: /data/test/Tom_Hanks_51.jpg \n inflating: /data/test/Tom_Harkin_00.jpg \n inflating: /data/test/Tom_Harkin_01.jpg \n inflating: /data/test/Tom_Harkin_30.jpg \n inflating: /data/test/Tom_Harkin_31.jpg \n inflating: /data/test/Tom_Harkin_40.jpg \n inflating: /data/test/Tom_Harkin_41.jpg \n inflating: /data/test/Tom_Osborne_20.jpg \n inflating: /data/test/Tom_Osborne_21.jpg \n inflating: /data/test/Tom_Osborne_30.jpg \n inflating: /data/test/Tom_Osborne_31.jpg \n inflating: /data/test/Tom_Osborne_50.jpg \n inflating: /data/test/Tom_Osborne_51.jpg \n inflating: /data/test/Tom_Ridge_20.jpg \n inflating: /data/test/Tom_Ridge_21.jpg \n inflating: /data/test/Tom_Ridge_30.jpg \n inflating: /data/test/Tom_Ridge_31.jpg \n inflating: /data/test/Tom_Ridge_50.jpg \n inflating: /data/test/Tom_Ridge_51.jpg \n inflating: /data/test/Tom_Sizemore_00.jpg \n inflating: /data/test/Tom_Sizemore_01.jpg \n inflating: /data/test/Tom_Sizemore_10.jpg \n inflating: /data/test/Tom_Sizemore_11.jpg \n inflating: /data/test/Tom_Sizemore_20.jpg \n inflating: /data/test/Tom_Sizemore_21.jpg \n inflating: /data/test/Valerie_Harper_00.jpg \n inflating: /data/test/Valerie_Harper_01.jpg \n inflating: /data/test/Valerie_Harper_30.jpg \n inflating: /data/test/Valerie_Harper_31.jpg \n inflating: /data/test/Valerie_Harper_40.jpg \n inflating: /data/test/Valerie_Harper_41.jpg \n inflating: /data/test/Valerie_Harper_50.jpg \n inflating: /data/test/Valerie_Harper_51.jpg \n inflating: /data/test/Vicente_Fox_10.jpg \n inflating: /data/test/Vicente_Fox_11.jpg \n inflating: /data/test/Vicente_Fox_20.jpg \n inflating: /data/test/Vicente_Fox_21.jpg \n inflating: /data/test/Vicente_Fox_30.jpg \n inflating: /data/test/Vicente_Fox_31.jpg \n inflating: /data/test/Vojislav_Seselj_00.jpg \n inflating: /data/test/Vojislav_Seselj_01.jpg \n inflating: /data/test/Vojislav_Seselj_20.jpg \n inflating: /data/test/Vojislav_Seselj_21.jpg \n inflating: /data/test/Vojislav_Seselj_40.jpg \n inflating: /data/test/Vojislav_Seselj_41.jpg \n inflating: /data/test/Vojislav_Seselj_50.jpg \n inflating: /data/test/Vojislav_Seselj_51.jpg \n inflating: /data/test/Warren_Beatty_10.jpg \n inflating: /data/test/Warren_Beatty_11.jpg \n inflating: /data/test/Warren_Beatty_30.jpg \n inflating: /data/test/Warren_Beatty_31.jpg \n inflating: /data/test/Warren_Beatty_50.jpg \n inflating: /data/test/Warren_Beatty_51.jpg \n inflating: /data/test/Warren_Buffett_00.jpg \n inflating: /data/test/Warren_Buffett_01.jpg \n inflating: /data/test/Warren_Buffett_30.jpg \n inflating: /data/test/Warren_Buffett_31.jpg \n inflating: /data/test/Warren_Buffett_40.jpg \n inflating: /data/test/Warren_Buffett_41.jpg \n inflating: /data/test/Warren_Buffett_50.jpg \n inflating: /data/test/Warren_Buffett_51.jpg \n inflating: /data/test/Wayne_Allard_00.jpg \n inflating: /data/test/Wayne_Allard_01.jpg \n inflating: /data/test/Wayne_Allard_20.jpg \n inflating: /data/test/Wayne_Allard_21.jpg \n inflating: /data/test/Wayne_Allard_50.jpg \n inflating: /data/test/Wayne_Allard_51.jpg \n inflating: /data/test/Wayne_Gretzky_20.jpg \n inflating: /data/test/Wayne_Gretzky_21.jpg \n inflating: /data/test/Wayne_Gretzky_30.jpg \n inflating: /data/test/Wayne_Gretzky_31.jpg \n inflating: /data/test/Wayne_Gretzky_40.jpg \n inflating: /data/test/Wayne_Gretzky_41.jpg \n inflating: /data/test/Wayne_Newton_10.jpg \n inflating: /data/test/Wayne_Newton_11.jpg \n inflating: /data/test/Wayne_Newton_20.jpg \n inflating: /data/test/Wayne_Newton_21.jpg \n inflating: /data/test/Wayne_Newton_40.jpg \n inflating: /data/test/Wayne_Newton_41.jpg \n inflating: /data/test/Wes_Craven_00.jpg \n inflating: /data/test/Wes_Craven_01.jpg \n inflating: /data/test/Wes_Craven_20.jpg \n inflating: /data/test/Wes_Craven_21.jpg \n inflating: /data/test/Wes_Craven_30.jpg \n inflating: /data/test/Wes_Craven_31.jpg \n inflating: /data/test/Wes_Craven_50.jpg \n inflating: /data/test/Wes_Craven_51.jpg \n inflating: /data/test/Wesley_Clark_00.jpg \n inflating: /data/test/Wesley_Clark_01.jpg \n inflating: /data/test/Wesley_Clark_20.jpg \n inflating: /data/test/Wesley_Clark_21.jpg \n inflating: /data/test/Wesley_Clark_30.jpg \n inflating: /data/test/Wesley_Clark_31.jpg \n inflating: /data/test/Wesley_Clark_40.jpg \n inflating: /data/test/Wesley_Clark_41.jpg \n inflating: /data/test/Whoopi_Goldberg_00.jpg \n inflating: /data/test/Whoopi_Goldberg_01.jpg \n inflating: /data/test/Whoopi_Goldberg_40.jpg \n inflating: /data/test/Whoopi_Goldberg_41.jpg \n inflating: /data/test/Whoopi_Goldberg_50.jpg \n inflating: /data/test/Whoopi_Goldberg_51.jpg \n inflating: /data/test/William_Delahunt_00.jpg \n inflating: /data/test/William_Delahunt_01.jpg \n inflating: /data/test/William_Delahunt_10.jpg \n inflating: /data/test/William_Delahunt_11.jpg \n inflating: /data/test/William_Delahunt_20.jpg \n inflating: /data/test/William_Delahunt_21.jpg \n inflating: /data/test/William_Donaldson_00.jpg \n inflating: /data/test/William_Donaldson_01.jpg \n inflating: /data/test/William_Donaldson_10.jpg \n inflating: /data/test/William_Donaldson_11.jpg \n inflating: /data/test/William_Donaldson_50.jpg \n inflating: /data/test/William_Donaldson_51.jpg \n inflating: /data/test/William_McDonough_00.jpg \n inflating: /data/test/William_McDonough_01.jpg \n inflating: /data/test/William_McDonough_20.jpg \n inflating: /data/test/William_McDonough_21.jpg \n inflating: /data/test/William_McDonough_30.jpg \n inflating: /data/test/William_McDonough_31.jpg \n inflating: /data/test/William_McDonough_40.jpg \n inflating: /data/test/William_McDonough_41.jpg \n inflating: /data/test/Yang_Jianli_00.jpg \n inflating: /data/test/Yang_Jianli_01.jpg \n inflating: /data/test/Yang_Jianli_10.jpg \n inflating: /data/test/Yang_Jianli_11.jpg \n inflating: /data/test/Yang_Jianli_30.jpg \n inflating: /data/test/Yang_Jianli_31.jpg \n inflating: /data/test/Yuri_Fedotov_20.jpg \n inflating: /data/test/Yuri_Fedotov_21.jpg \n inflating: /data/test/Yuri_Fedotov_30.jpg \n inflating: /data/test/Yuri_Fedotov_31.jpg \n inflating: /data/test/Yuri_Fedotov_40.jpg \n inflating: /data/test/Yuri_Fedotov_41.jpg \n inflating: /data/test/Zhang_Ziyi_10.jpg \n inflating: /data/test/Zhang_Ziyi_11.jpg \n inflating: /data/test/Zhang_Ziyi_20.jpg \n inflating: /data/test/Zhang_Ziyi_21.jpg \n inflating: /data/test/Zhang_Ziyi_40.jpg \n inflating: /data/test/Zhang_Ziyi_41.jpg \n inflating: /data/test/Zhong_Nanshan_00.jpg \n inflating: /data/test/Zhong_Nanshan_01.jpg \n inflating: /data/test/Zhong_Nanshan_10.jpg \n inflating: /data/test/Zhong_Nanshan_11.jpg \n inflating: /data/test/Zhong_Nanshan_50.jpg \n inflating: /data/test/Zhong_Nanshan_51.jpg \n inflating: /data/test_frames_keypoints.csv \n creating: /data/training/\n inflating: /data/training/Abdel_Aziz_Al-Hakim_00.jpg \n inflating: /data/training/Abdel_Aziz_Al-Hakim_01.jpg \n inflating: /data/training/Abdel_Aziz_Al-Hakim_02.jpg \n inflating: /data/training/Abdel_Aziz_Al-Hakim_10.jpg \n inflating: /data/training/Abdel_Aziz_Al-Hakim_11.jpg \n inflating: /data/training/Abdel_Aziz_Al-Hakim_12.jpg \n inflating: /data/training/Abdel_Aziz_Al-Hakim_40.jpg \n inflating: /data/training/Abdel_Aziz_Al-Hakim_41.jpg \n inflating: /data/training/Abdel_Aziz_Al-Hakim_42.jpg \n inflating: /data/training/Abdullah_Gul_10.jpg \n inflating: /data/training/Abdullah_Gul_11.jpg \n inflating: /data/training/Abdullah_Gul_12.jpg \n inflating: /data/training/Abdullah_Gul_30.jpg \n inflating: /data/training/Abdullah_Gul_31.jpg \n inflating: /data/training/Abdullah_Gul_32.jpg \n inflating: /data/training/Abdullah_Gul_50.jpg \n inflating: /data/training/Abdullah_Gul_51.jpg \n inflating: /data/training/Abdullah_Gul_52.jpg \n inflating: /data/training/Adam_Sandler_00.jpg \n inflating: /data/training/Adam_Sandler_01.jpg \n inflating: /data/training/Adam_Sandler_02.jpg \n inflating: /data/training/Adam_Sandler_10.jpg \n inflating: /data/training/Adam_Sandler_11.jpg \n inflating: /data/training/Adam_Sandler_12.jpg \n inflating: /data/training/Adam_Sandler_40.jpg \n inflating: /data/training/Adam_Sandler_41.jpg \n inflating: /data/training/Adam_Sandler_42.jpg \n inflating: /data/training/Adrian_Nastase_10.jpg \n inflating: /data/training/Adrian_Nastase_11.jpg \n inflating: /data/training/Adrian_Nastase_12.jpg \n inflating: /data/training/Adrian_Nastase_40.jpg \n inflating: /data/training/Adrian_Nastase_41.jpg \n inflating: /data/training/Adrian_Nastase_42.jpg \n inflating: /data/training/Adrian_Nastase_50.jpg \n inflating: /data/training/Adrian_Nastase_51.jpg \n inflating: /data/training/Adrian_Nastase_52.jpg \n inflating: /data/training/Agbani_Darego_00.jpg \n inflating: /data/training/Agbani_Darego_01.jpg \n inflating: /data/training/Agbani_Darego_02.jpg \n inflating: /data/training/Agbani_Darego_20.jpg \n inflating: /data/training/Agbani_Darego_21.jpg \n inflating: /data/training/Agbani_Darego_22.jpg \n inflating: /data/training/Agbani_Darego_40.jpg \n inflating: /data/training/Agbani_Darego_41.jpg \n inflating: /data/training/Agbani_Darego_42.jpg \n inflating: /data/training/Agbani_Darego_50.jpg \n inflating: /data/training/Agbani_Darego_51.jpg \n inflating: /data/training/Agbani_Darego_52.jpg \n inflating: /data/training/Agnes_Bruckner_00.jpg \n inflating: /data/training/Agnes_Bruckner_01.jpg \n inflating: /data/training/Agnes_Bruckner_02.jpg \n inflating: /data/training/Agnes_Bruckner_10.jpg \n inflating: /data/training/Agnes_Bruckner_11.jpg \n inflating: /data/training/Agnes_Bruckner_12.jpg \n inflating: /data/training/Agnes_Bruckner_20.jpg \n inflating: /data/training/Agnes_Bruckner_21.jpg \n inflating: /data/training/Agnes_Bruckner_22.jpg \n inflating: /data/training/Agnes_Bruckner_40.jpg \n inflating: /data/training/Agnes_Bruckner_41.jpg \n inflating: /data/training/Agnes_Bruckner_42.jpg \n inflating: /data/training/Ahmad_Masood_00.jpg \n inflating: /data/training/Ahmad_Masood_01.jpg \n inflating: /data/training/Ahmad_Masood_02.jpg \n inflating: /data/training/Ahmad_Masood_30.jpg \n inflating: /data/training/Ahmad_Masood_31.jpg \n inflating: /data/training/Ahmad_Masood_32.jpg \n inflating: /data/training/Ahmad_Masood_40.jpg \n inflating: /data/training/Ahmad_Masood_41.jpg \n inflating: /data/training/Ahmad_Masood_42.jpg \n inflating: /data/training/Ahmed_Ahmed_00.jpg \n inflating: /data/training/Ahmed_Ahmed_01.jpg \n inflating: /data/training/Ahmed_Ahmed_02.jpg \n inflating: /data/training/Ahmed_Ahmed_10.jpg \n inflating: /data/training/Ahmed_Ahmed_11.jpg \n inflating: /data/training/Ahmed_Ahmed_12.jpg \n inflating: /data/training/Ahmed_Ahmed_40.jpg \n inflating: /data/training/Ahmed_Ahmed_41.jpg \n inflating: /data/training/Ahmed_Ahmed_42.jpg \n inflating: /data/training/Ahmed_Ahmed_50.jpg \n inflating: /data/training/Ahmed_Ahmed_51.jpg \n inflating: /data/training/Ahmed_Ahmed_52.jpg \n inflating: /data/training/Aidan_Quinn_00.jpg \n inflating: /data/training/Aidan_Quinn_01.jpg \n inflating: /data/training/Aidan_Quinn_02.jpg \n inflating: /data/training/Aidan_Quinn_10.jpg \n inflating: /data/training/Aidan_Quinn_11.jpg \n inflating: /data/training/Aidan_Quinn_12.jpg \n inflating: /data/training/Aidan_Quinn_20.jpg \n inflating: /data/training/Aidan_Quinn_21.jpg \n inflating: /data/training/Aidan_Quinn_22.jpg \n inflating: /data/training/Aidan_Quinn_30.jpg \n inflating: /data/training/Aidan_Quinn_31.jpg \n inflating: /data/training/Aidan_Quinn_32.jpg \n inflating: /data/training/Aishwarya_Rai_00.jpg \n inflating: /data/training/Aishwarya_Rai_01.jpg \n inflating: /data/training/Aishwarya_Rai_02.jpg \n inflating: /data/training/Aishwarya_Rai_10.jpg \n inflating: /data/training/Aishwarya_Rai_11.jpg \n inflating: /data/training/Aishwarya_Rai_12.jpg \n inflating: /data/training/Aishwarya_Rai_40.jpg \n inflating: /data/training/Aishwarya_Rai_41.jpg \n inflating: /data/training/Aishwarya_Rai_42.jpg \n inflating: /data/training/Aishwarya_Rai_50.jpg \n inflating: /data/training/Aishwarya_Rai_51.jpg \n inflating: /data/training/Aishwarya_Rai_52.jpg \n inflating: /data/training/Albert_Brooks_00.jpg \n inflating: /data/training/Albert_Brooks_01.jpg \n inflating: /data/training/Albert_Brooks_02.jpg \n inflating: /data/training/Albert_Brooks_10.jpg \n inflating: /data/training/Albert_Brooks_11.jpg \n inflating: /data/training/Albert_Brooks_12.jpg \n inflating: /data/training/Albert_Brooks_30.jpg \n inflating: /data/training/Albert_Brooks_31.jpg \n inflating: /data/training/Albert_Brooks_32.jpg \n inflating: /data/training/Alejandro_Toledo_10.jpg \n inflating: /data/training/Alejandro_Toledo_11.jpg \n inflating: /data/training/Alejandro_Toledo_12.jpg \n inflating: /data/training/Alejandro_Toledo_30.jpg \n inflating: /data/training/Alejandro_Toledo_31.jpg \n inflating: /data/training/Alejandro_Toledo_32.jpg \n inflating: /data/training/Alejandro_Toledo_50.jpg \n inflating: /data/training/Alejandro_Toledo_51.jpg \n inflating: /data/training/Alejandro_Toledo_52.jpg \n inflating: /data/training/Aleksander_Kwasniewski_00.jpg \n inflating: /data/training/Aleksander_Kwasniewski_01.jpg \n inflating: /data/training/Aleksander_Kwasniewski_02.jpg \n inflating: /data/training/Aleksander_Kwasniewski_10.jpg \n inflating: /data/training/Aleksander_Kwasniewski_11.jpg \n inflating: /data/training/Aleksander_Kwasniewski_12.jpg \n inflating: /data/training/Aleksander_Kwasniewski_20.jpg \n inflating: /data/training/Aleksander_Kwasniewski_21.jpg \n inflating: /data/training/Aleksander_Kwasniewski_22.jpg \n inflating: /data/training/Aleksander_Kwasniewski_30.jpg \n inflating: /data/training/Aleksander_Kwasniewski_31.jpg \n inflating: /data/training/Aleksander_Kwasniewski_32.jpg \n inflating: /data/training/Alex_Ferguson_00.jpg \n inflating: /data/training/Alex_Ferguson_01.jpg \n inflating: /data/training/Alex_Ferguson_02.jpg \n inflating: /data/training/Alex_Ferguson_10.jpg \n inflating: /data/training/Alex_Ferguson_11.jpg \n inflating: /data/training/Alex_Ferguson_12.jpg \n inflating: /data/training/Alex_Ferguson_50.jpg \n inflating: /data/training/Alex_Ferguson_51.jpg \n inflating: /data/training/Alex_Ferguson_52.jpg \n inflating: /data/training/Alexandra_Pelosi_00.jpg \n inflating: /data/training/Alexandra_Pelosi_01.jpg \n inflating: /data/training/Alexandra_Pelosi_02.jpg \n inflating: /data/training/Alexandra_Pelosi_10.jpg \n inflating: /data/training/Alexandra_Pelosi_11.jpg \n inflating: /data/training/Alexandra_Pelosi_12.jpg \n inflating: /data/training/Alexandra_Pelosi_30.jpg \n inflating: /data/training/Alexandra_Pelosi_31.jpg \n inflating: /data/training/Alexandra_Pelosi_32.jpg \n inflating: /data/training/Alfredo_di_Stefano_00.jpg \n inflating: /data/training/Alfredo_di_Stefano_01.jpg \n inflating: /data/training/Alfredo_di_Stefano_02.jpg \n inflating: /data/training/Alfredo_di_Stefano_20.jpg \n inflating: /data/training/Alfredo_di_Stefano_21.jpg \n inflating: /data/training/Alfredo_di_Stefano_22.jpg \n inflating: /data/training/Alfredo_di_Stefano_50.jpg \n inflating: /data/training/Alfredo_di_Stefano_51.jpg \n inflating: /data/training/Alfredo_di_Stefano_52.jpg \n inflating: /data/training/Ali_Abbas_20.jpg \n inflating: /data/training/Ali_Abbas_21.jpg \n inflating: /data/training/Ali_Abbas_22.jpg \n inflating: /data/training/Ali_Abbas_30.jpg \n inflating: /data/training/Ali_Abbas_31.jpg \n inflating: /data/training/Ali_Abbas_32.jpg \n inflating: /data/training/Ali_Abbas_40.jpg \n inflating: /data/training/Ali_Abbas_41.jpg \n inflating: /data/training/Ali_Abbas_42.jpg \n inflating: /data/training/Ali_Abbas_50.jpg \n inflating: /data/training/Ali_Abbas_51.jpg \n inflating: /data/training/Ali_Abbas_52.jpg \n inflating: /data/training/Alicia_Silverstone_00.jpg \n inflating: /data/training/Alicia_Silverstone_01.jpg \n inflating: /data/training/Alicia_Silverstone_02.jpg \n inflating: /data/training/Alicia_Silverstone_10.jpg \n inflating: /data/training/Alicia_Silverstone_11.jpg \n inflating: /data/training/Alicia_Silverstone_12.jpg \n inflating: /data/training/Alicia_Silverstone_20.jpg \n inflating: /data/training/Alicia_Silverstone_21.jpg \n inflating: /data/training/Alicia_Silverstone_22.jpg \n inflating: /data/training/Alicia_Silverstone_50.jpg \n inflating: /data/training/Alicia_Silverstone_51.jpg \n inflating: /data/training/Alicia_Silverstone_52.jpg \n inflating: /data/training/Alma_Powell_00.jpg \n inflating: /data/training/Alma_Powell_01.jpg \n inflating: /data/training/Alma_Powell_02.jpg \n inflating: /data/training/Alma_Powell_10.jpg \n inflating: /data/training/Alma_Powell_11.jpg \n inflating: /data/training/Alma_Powell_12.jpg \n inflating: /data/training/Alma_Powell_40.jpg \n inflating: /data/training/Alma_Powell_41.jpg \n inflating: /data/training/Alma_Powell_42.jpg \n inflating: /data/training/Alma_Powell_50.jpg \n inflating: /data/training/Alma_Powell_51.jpg \n inflating: /data/training/Alma_Powell_52.jpg \n inflating: /data/training/Alvaro_Silva_Calderon_00.jpg \n inflating: /data/training/Alvaro_Silva_Calderon_01.jpg \n inflating: /data/training/Alvaro_Silva_Calderon_02.jpg \n inflating: /data/training/Alvaro_Silva_Calderon_10.jpg \n inflating: /data/training/Alvaro_Silva_Calderon_11.jpg \n inflating: /data/training/Alvaro_Silva_Calderon_12.jpg \n inflating: /data/training/Alvaro_Silva_Calderon_20.jpg \n inflating: /data/training/Alvaro_Silva_Calderon_21.jpg \n inflating: /data/training/Alvaro_Silva_Calderon_22.jpg \n inflating: /data/training/Alvaro_Silva_Calderon_30.jpg \n inflating: /data/training/Alvaro_Silva_Calderon_31.jpg \n inflating: /data/training/Alvaro_Silva_Calderon_32.jpg \n inflating: /data/training/Amelia_Vega_10.jpg \n inflating: /data/training/Amelia_Vega_11.jpg \n inflating: /data/training/Amelia_Vega_12.jpg \n inflating: /data/training/Amelia_Vega_20.jpg \n inflating: /data/training/Amelia_Vega_21.jpg \n inflating: /data/training/Amelia_Vega_22.jpg \n inflating: /data/training/Amelia_Vega_30.jpg \n inflating: /data/training/Amelia_Vega_31.jpg \n inflating: /data/training/Amelia_Vega_32.jpg \n inflating: /data/training/Amelia_Vega_40.jpg \n inflating: /data/training/Amelia_Vega_41.jpg \n inflating: /data/training/Amelia_Vega_42.jpg \n inflating: /data/training/Amy_Brenneman_10.jpg \n inflating: /data/training/Amy_Brenneman_11.jpg \n inflating: /data/training/Amy_Brenneman_12.jpg \n inflating: /data/training/Amy_Brenneman_30.jpg \n inflating: /data/training/Amy_Brenneman_31.jpg \n inflating: /data/training/Amy_Brenneman_32.jpg \n inflating: /data/training/Amy_Brenneman_50.jpg \n inflating: /data/training/Amy_Brenneman_51.jpg \n inflating: /data/training/Amy_Brenneman_52.jpg \n inflating: /data/training/Andrea_Bocelli_10.jpg \n inflating: /data/training/Andrea_Bocelli_11.jpg \n inflating: /data/training/Andrea_Bocelli_12.jpg \n inflating: /data/training/Andrea_Bocelli_20.jpg \n inflating: /data/training/Andrea_Bocelli_21.jpg \n inflating: /data/training/Andrea_Bocelli_22.jpg \n inflating: /data/training/Andrea_Bocelli_30.jpg \n inflating: /data/training/Andrea_Bocelli_31.jpg \n inflating: /data/training/Andrea_Bocelli_32.jpg \n inflating: /data/training/Andy_Roddick_20.jpg \n inflating: /data/training/Andy_Roddick_21.jpg \n inflating: /data/training/Andy_Roddick_22.jpg \n inflating: /data/training/Andy_Roddick_40.jpg \n inflating: /data/training/Andy_Roddick_41.jpg \n inflating: /data/training/Andy_Roddick_42.jpg \n inflating: /data/training/Andy_Roddick_50.jpg \n inflating: /data/training/Andy_Roddick_51.jpg \n inflating: /data/training/Andy_Roddick_52.jpg \n inflating: /data/training/Andy_Rooney_10.jpg \n inflating: /data/training/Andy_Rooney_11.jpg \n inflating: /data/training/Andy_Rooney_12.jpg \n inflating: /data/training/Andy_Rooney_20.jpg \n inflating: /data/training/Andy_Rooney_21.jpg \n inflating: /data/training/Andy_Rooney_22.jpg \n inflating: /data/training/Andy_Rooney_50.jpg \n inflating: /data/training/Andy_Rooney_51.jpg \n inflating: /data/training/Andy_Rooney_52.jpg \n inflating: /data/training/Angel_Lockward_30.jpg \n inflating: /data/training/Angel_Lockward_31.jpg \n inflating: /data/training/Angel_Lockward_32.jpg \n inflating: /data/training/Angel_Lockward_40.jpg \n inflating: /data/training/Angel_Lockward_41.jpg \n inflating: /data/training/Angel_Lockward_42.jpg \n inflating: /data/training/Angel_Lockward_50.jpg \n inflating: /data/training/Angel_Lockward_51.jpg \n inflating: /data/training/Angel_Lockward_52.jpg \n inflating: /data/training/Angela_Bassett_20.jpg \n inflating: /data/training/Angela_Bassett_21.jpg \n inflating: /data/training/Angela_Bassett_22.jpg \n inflating: /data/training/Angela_Bassett_30.jpg \n inflating: /data/training/Angela_Bassett_31.jpg \n inflating: /data/training/Angela_Bassett_32.jpg \n inflating: /data/training/Angela_Bassett_40.jpg \n inflating: /data/training/Angela_Bassett_41.jpg \n inflating: /data/training/Angela_Bassett_42.jpg \n inflating: /data/training/Angelo_Reyes_20.jpg \n inflating: /data/training/Angelo_Reyes_21.jpg \n inflating: /data/training/Angelo_Reyes_22.jpg \n inflating: /data/training/Angelo_Reyes_30.jpg \n inflating: /data/training/Angelo_Reyes_31.jpg \n inflating: /data/training/Angelo_Reyes_32.jpg \n inflating: /data/training/Angelo_Reyes_50.jpg \n inflating: /data/training/Angelo_Reyes_51.jpg \n inflating: /data/training/Angelo_Reyes_52.jpg \n inflating: /data/training/Baburam_Bhattari_00.jpg \n inflating: /data/training/Baburam_Bhattari_01.jpg \n inflating: /data/training/Baburam_Bhattari_02.jpg \n inflating: /data/training/Baburam_Bhattari_20.jpg \n inflating: /data/training/Baburam_Bhattari_21.jpg \n inflating: /data/training/Baburam_Bhattari_22.jpg \n inflating: /data/training/Baburam_Bhattari_30.jpg \n inflating: /data/training/Baburam_Bhattari_31.jpg \n inflating: /data/training/Baburam_Bhattari_32.jpg \n inflating: /data/training/Barbara_Bodine_00.jpg \n inflating: /data/training/Barbara_Bodine_01.jpg \n inflating: /data/training/Barbara_Bodine_02.jpg \n inflating: /data/training/Barbara_Bodine_20.jpg \n inflating: /data/training/Barbara_Bodine_21.jpg \n inflating: /data/training/Barbara_Bodine_22.jpg \n inflating: /data/training/Barbara_Bodine_40.jpg \n inflating: /data/training/Barbara_Bodine_41.jpg \n inflating: /data/training/Barbara_Bodine_42.jpg \n inflating: /data/training/Barbara_Bodine_50.jpg \n inflating: /data/training/Barbara_Bodine_51.jpg \n inflating: /data/training/Barbara_Bodine_52.jpg \n inflating: /data/training/Barbara_Boxer_10.jpg \n inflating: /data/training/Barbara_Boxer_11.jpg \n inflating: /data/training/Barbara_Boxer_12.jpg \n inflating: /data/training/Barbara_Boxer_40.jpg \n inflating: /data/training/Barbara_Boxer_41.jpg \n inflating: /data/training/Barbara_Boxer_42.jpg \n inflating: /data/training/Barbara_Boxer_50.jpg \n inflating: /data/training/Barbara_Boxer_51.jpg \n inflating: /data/training/Barbara_Boxer_52.jpg \n inflating: /data/training/Barbara_Walters_00.jpg \n inflating: /data/training/Barbara_Walters_01.jpg \n inflating: /data/training/Barbara_Walters_02.jpg \n inflating: /data/training/Barbara_Walters_20.jpg \n inflating: /data/training/Barbara_Walters_21.jpg \n inflating: /data/training/Barbara_Walters_22.jpg \n inflating: /data/training/Barbara_Walters_40.jpg \n inflating: /data/training/Barbara_Walters_41.jpg \n inflating: /data/training/Barbara_Walters_42.jpg \n inflating: /data/training/Barbara_Walters_50.jpg \n inflating: /data/training/Barbara_Walters_51.jpg \n inflating: /data/training/Barbara_Walters_52.jpg \n inflating: /data/training/Barry_Alvarez_00.jpg \n inflating: /data/training/Barry_Alvarez_01.jpg \n inflating: /data/training/Barry_Alvarez_02.jpg \n inflating: /data/training/Barry_Alvarez_10.jpg \n inflating: /data/training/Barry_Alvarez_11.jpg \n inflating: /data/training/Barry_Alvarez_12.jpg \n inflating: /data/training/Barry_Alvarez_20.jpg \n inflating: /data/training/Barry_Alvarez_21.jpg \n inflating: /data/training/Barry_Alvarez_22.jpg \n inflating: /data/training/Barry_Alvarez_30.jpg \n inflating: /data/training/Barry_Alvarez_31.jpg \n inflating: /data/training/Barry_Alvarez_32.jpg \n inflating: /data/training/Ben_Kingsley_10.jpg \n inflating: /data/training/Ben_Kingsley_11.jpg \n inflating: /data/training/Ben_Kingsley_12.jpg \n inflating: /data/training/Ben_Kingsley_20.jpg \n inflating: /data/training/Ben_Kingsley_21.jpg \n inflating: /data/training/Ben_Kingsley_22.jpg \n inflating: /data/training/Ben_Kingsley_50.jpg \n inflating: /data/training/Ben_Kingsley_51.jpg \n inflating: /data/training/Ben_Kingsley_52.jpg \n inflating: /data/training/Ben_Stein_10.jpg \n inflating: /data/training/Ben_Stein_11.jpg \n inflating: /data/training/Ben_Stein_12.jpg \n inflating: /data/training/Ben_Stein_30.jpg \n inflating: /data/training/Ben_Stein_31.jpg \n inflating: /data/training/Ben_Stein_32.jpg \n inflating: /data/training/Ben_Stein_40.jpg \n inflating: /data/training/Ben_Stein_41.jpg \n inflating: /data/training/Ben_Stein_42.jpg \n inflating: /data/training/Ben_Stein_50.jpg \n inflating: /data/training/Ben_Stein_51.jpg \n inflating: /data/training/Ben_Stein_52.jpg \n inflating: /data/training/Benedita_da_Silva_10.jpg \n inflating: /data/training/Benedita_da_Silva_11.jpg \n inflating: /data/training/Benedita_da_Silva_12.jpg \n inflating: /data/training/Benedita_da_Silva_20.jpg \n inflating: /data/training/Benedita_da_Silva_21.jpg \n inflating: /data/training/Benedita_da_Silva_22.jpg \n inflating: /data/training/Benedita_da_Silva_50.jpg \n inflating: /data/training/Benedita_da_Silva_51.jpg \n inflating: /data/training/Benedita_da_Silva_52.jpg \n inflating: /data/training/Benjamin_McKenzie_30.jpg \n inflating: /data/training/Benjamin_McKenzie_31.jpg \n inflating: /data/training/Benjamin_McKenzie_32.jpg \n inflating: /data/training/Benjamin_McKenzie_40.jpg \n inflating: /data/training/Benjamin_McKenzie_41.jpg \n inflating: /data/training/Benjamin_McKenzie_42.jpg \n inflating: /data/training/Benjamin_McKenzie_50.jpg \n inflating: /data/training/Benjamin_McKenzie_51.jpg \n inflating: /data/training/Benjamin_McKenzie_52.jpg \n inflating: /data/training/Benjamin_Netanyahu_00.jpg \n inflating: /data/training/Benjamin_Netanyahu_01.jpg \n inflating: /data/training/Benjamin_Netanyahu_02.jpg \n inflating: /data/training/Benjamin_Netanyahu_10.jpg \n inflating: /data/training/Benjamin_Netanyahu_11.jpg \n inflating: /data/training/Benjamin_Netanyahu_12.jpg \n inflating: /data/training/Benjamin_Netanyahu_30.jpg \n inflating: /data/training/Benjamin_Netanyahu_31.jpg \n inflating: /data/training/Benjamin_Netanyahu_32.jpg \n inflating: /data/training/Benjamin_Netanyahu_40.jpg \n inflating: /data/training/Benjamin_Netanyahu_41.jpg \n inflating: /data/training/Benjamin_Netanyahu_42.jpg \n inflating: /data/training/Beyonce_Knowles_00.jpg \n inflating: /data/training/Beyonce_Knowles_01.jpg \n inflating: /data/training/Beyonce_Knowles_02.jpg \n inflating: /data/training/Beyonce_Knowles_30.jpg \n inflating: /data/training/Beyonce_Knowles_31.jpg \n inflating: /data/training/Beyonce_Knowles_32.jpg \n inflating: /data/training/Beyonce_Knowles_50.jpg \n inflating: /data/training/Beyonce_Knowles_51.jpg \n inflating: /data/training/Beyonce_Knowles_52.jpg \n inflating: /data/training/Bianca_Jagger_20.jpg \n inflating: /data/training/Bianca_Jagger_21.jpg \n inflating: /data/training/Bianca_Jagger_22.jpg \n inflating: /data/training/Bianca_Jagger_30.jpg \n inflating: /data/training/Bianca_Jagger_31.jpg \n inflating: /data/training/Bianca_Jagger_32.jpg \n inflating: /data/training/Bianca_Jagger_40.jpg \n inflating: /data/training/Bianca_Jagger_41.jpg \n inflating: /data/training/Bianca_Jagger_42.jpg \n inflating: /data/training/Biljana_Plavsic_00.jpg \n inflating: /data/training/Biljana_Plavsic_01.jpg \n inflating: /data/training/Biljana_Plavsic_02.jpg \n inflating: /data/training/Biljana_Plavsic_10.jpg \n inflating: /data/training/Biljana_Plavsic_11.jpg \n inflating: /data/training/Biljana_Plavsic_12.jpg \n inflating: /data/training/Biljana_Plavsic_30.jpg \n inflating: /data/training/Biljana_Plavsic_31.jpg \n inflating: /data/training/Biljana_Plavsic_32.jpg \n inflating: /data/training/Bill_Bradley_10.jpg \n inflating: /data/training/Bill_Bradley_11.jpg \n inflating: /data/training/Bill_Bradley_12.jpg \n inflating: /data/training/Bill_Bradley_30.jpg \n inflating: /data/training/Bill_Bradley_31.jpg \n inflating: /data/training/Bill_Bradley_32.jpg \n inflating: /data/training/Bill_Bradley_50.jpg \n inflating: /data/training/Bill_Bradley_51.jpg \n inflating: /data/training/Bill_Bradley_52.jpg \n inflating: /data/training/Bill_Clinton_00.jpg \n inflating: /data/training/Bill_Clinton_01.jpg \n inflating: /data/training/Bill_Clinton_02.jpg \n inflating: /data/training/Bill_Clinton_10.jpg \n inflating: /data/training/Bill_Clinton_11.jpg \n inflating: /data/training/Bill_Clinton_12.jpg \n inflating: /data/training/Bill_Clinton_50.jpg \n inflating: /data/training/Bill_Clinton_51.jpg \n inflating: /data/training/Bill_Clinton_52.jpg \n inflating: /data/training/Bill_Frist_00.jpg \n inflating: /data/training/Bill_Frist_01.jpg \n inflating: /data/training/Bill_Frist_02.jpg \n inflating: /data/training/Bill_Frist_10.jpg \n inflating: /data/training/Bill_Frist_11.jpg \n inflating: /data/training/Bill_Frist_12.jpg \n inflating: /data/training/Bill_Frist_20.jpg \n inflating: /data/training/Bill_Frist_21.jpg \n inflating: /data/training/Bill_Frist_22.jpg \n inflating: /data/training/Cameron_Diaz_20.jpg \n inflating: /data/training/Cameron_Diaz_21.jpg \n inflating: /data/training/Cameron_Diaz_22.jpg \n inflating: /data/training/Cameron_Diaz_40.jpg \n inflating: /data/training/Cameron_Diaz_41.jpg \n inflating: /data/training/Cameron_Diaz_42.jpg \n inflating: /data/training/Cameron_Diaz_50.jpg \n inflating: /data/training/Cameron_Diaz_51.jpg \n inflating: /data/training/Cameron_Diaz_52.jpg \n inflating: /data/training/Carey_Lowell_00.jpg \n inflating: /data/training/Carey_Lowell_01.jpg \n inflating: /data/training/Carey_Lowell_02.jpg \n inflating: /data/training/Carey_Lowell_20.jpg \n inflating: /data/training/Carey_Lowell_21.jpg \n inflating: /data/training/Carey_Lowell_22.jpg \n inflating: /data/training/Carey_Lowell_30.jpg \n inflating: /data/training/Carey_Lowell_31.jpg \n inflating: /data/training/Carey_Lowell_32.jpg \n inflating: /data/training/Carla_Gugino_10.jpg \n inflating: /data/training/Carla_Gugino_11.jpg \n inflating: /data/training/Carla_Gugino_12.jpg \n inflating: /data/training/Carla_Gugino_30.jpg \n inflating: /data/training/Carla_Gugino_31.jpg \n inflating: /data/training/Carla_Gugino_32.jpg \n inflating: /data/training/Carla_Gugino_50.jpg \n inflating: /data/training/Carla_Gugino_51.jpg \n inflating: /data/training/Carla_Gugino_52.jpg \n inflating: /data/training/Carlo_Azeglio_Ciampi_00.jpg \n inflating: /data/training/Carlo_Azeglio_Ciampi_01.jpg \n inflating: /data/training/Carlo_Azeglio_Ciampi_02.jpg \n inflating: /data/training/Carlo_Azeglio_Ciampi_10.jpg \n inflating: /data/training/Carlo_Azeglio_Ciampi_11.jpg \n inflating: /data/training/Carlo_Azeglio_Ciampi_12.jpg \n inflating: /data/training/Carlo_Azeglio_Ciampi_20.jpg \n inflating: /data/training/Carlo_Azeglio_Ciampi_21.jpg \n inflating: /data/training/Carlo_Azeglio_Ciampi_22.jpg \n inflating: /data/training/Carlo_Azeglio_Ciampi_50.jpg \n inflating: /data/training/Carlo_Azeglio_Ciampi_51.jpg \n inflating: /data/training/Carlo_Azeglio_Ciampi_52.jpg \n inflating: /data/training/Carlos_Ghosn_10.jpg \n inflating: /data/training/Carlos_Ghosn_11.jpg \n inflating: /data/training/Carlos_Ghosn_12.jpg \n inflating: /data/training/Carlos_Ghosn_20.jpg \n inflating: /data/training/Carlos_Ghosn_21.jpg \n inflating: /data/training/Carlos_Ghosn_22.jpg \n inflating: /data/training/Carlos_Ghosn_40.jpg \n inflating: /data/training/Carlos_Ghosn_41.jpg \n inflating: /data/training/Carlos_Ghosn_42.jpg \n inflating: /data/training/Carlos_Iturgaitz_10.jpg \n inflating: /data/training/Carlos_Iturgaitz_11.jpg \n inflating: /data/training/Carlos_Iturgaitz_12.jpg \n inflating: /data/training/Carlos_Iturgaitz_20.jpg \n inflating: /data/training/Carlos_Iturgaitz_21.jpg \n inflating: /data/training/Carlos_Iturgaitz_22.jpg \n inflating: /data/training/Carlos_Iturgaitz_30.jpg \n inflating: /data/training/Carlos_Iturgaitz_31.jpg \n inflating: /data/training/Carlos_Iturgaitz_32.jpg \n inflating: /data/training/Carlos_Iturgaitz_50.jpg \n inflating: /data/training/Carlos_Iturgaitz_51.jpg \n inflating: /data/training/Carlos_Iturgaitz_52.jpg \n inflating: /data/training/Carlos_Menem_00.jpg \n inflating: /data/training/Carlos_Menem_01.jpg \n inflating: /data/training/Carlos_Menem_02.jpg \n inflating: /data/training/Carlos_Menem_20.jpg \n inflating: /data/training/Carlos_Menem_21.jpg \n inflating: /data/training/Carlos_Menem_22.jpg \n inflating: /data/training/Carlos_Menem_30.jpg \n inflating: /data/training/Carlos_Menem_31.jpg \n inflating: /data/training/Carlos_Menem_32.jpg \n inflating: /data/training/Carlos_Queiroz_00.jpg \n inflating: /data/training/Carlos_Queiroz_01.jpg \n inflating: /data/training/Carlos_Queiroz_02.jpg \n inflating: /data/training/Carlos_Queiroz_10.jpg \n inflating: /data/training/Carlos_Queiroz_11.jpg \n inflating: /data/training/Carlos_Queiroz_12.jpg \n inflating: /data/training/Carlos_Queiroz_50.jpg \n inflating: /data/training/Carlos_Queiroz_51.jpg \n inflating: /data/training/Carlos_Queiroz_52.jpg \n inflating: /data/training/Carrie-Anne_Moss_00.jpg \n inflating: /data/training/Carrie-Anne_Moss_01.jpg \n inflating: /data/training/Carrie-Anne_Moss_02.jpg \n inflating: /data/training/Carrie-Anne_Moss_10.jpg \n inflating: /data/training/Carrie-Anne_Moss_11.jpg \n inflating: /data/training/Carrie-Anne_Moss_12.jpg \n inflating: /data/training/Carrie-Anne_Moss_50.jpg \n inflating: /data/training/Carrie-Anne_Moss_51.jpg \n inflating: /data/training/Carrie-Anne_Moss_52.jpg \n inflating: /data/training/Catriona_Le_May_Doan_10.jpg \n inflating: /data/training/Catriona_Le_May_Doan_11.jpg \n inflating: /data/training/Catriona_Le_May_Doan_12.jpg \n inflating: /data/training/Catriona_Le_May_Doan_30.jpg \n inflating: /data/training/Catriona_Le_May_Doan_31.jpg \n inflating: /data/training/Catriona_Le_May_Doan_32.jpg \n inflating: /data/training/Catriona_Le_May_Doan_40.jpg \n inflating: /data/training/Catriona_Le_May_Doan_41.jpg \n inflating: /data/training/Catriona_Le_May_Doan_42.jpg \n inflating: /data/training/Cecilia_Cheung_00.jpg \n inflating: /data/training/Cecilia_Cheung_01.jpg \n inflating: /data/training/Cecilia_Cheung_02.jpg \n inflating: /data/training/Cecilia_Cheung_10.jpg \n inflating: /data/training/Cecilia_Cheung_11.jpg \n inflating: /data/training/Cecilia_Cheung_12.jpg \n inflating: /data/training/Cecilia_Cheung_20.jpg \n inflating: /data/training/Cecilia_Cheung_21.jpg \n inflating: /data/training/Cecilia_Cheung_22.jpg \n inflating: /data/training/Cecilia_Cheung_50.jpg \n inflating: /data/training/Cecilia_Cheung_51.jpg \n inflating: /data/training/Cecilia_Cheung_52.jpg \n inflating: /data/training/Celso_Amorim_10.jpg \n inflating: /data/training/Celso_Amorim_11.jpg \n inflating: /data/training/Celso_Amorim_12.jpg \n inflating: /data/training/Celso_Amorim_40.jpg \n inflating: /data/training/Celso_Amorim_41.jpg \n inflating: /data/training/Celso_Amorim_42.jpg \n inflating: /data/training/Celso_Amorim_50.jpg \n inflating: /data/training/Celso_Amorim_51.jpg \n inflating: /data/training/Celso_Amorim_52.jpg \n inflating: /data/training/Celso_Lafer_00.jpg \n inflating: /data/training/Celso_Lafer_01.jpg \n inflating: /data/training/Celso_Lafer_02.jpg \n inflating: /data/training/Celso_Lafer_10.jpg \n inflating: /data/training/Celso_Lafer_11.jpg \n inflating: /data/training/Celso_Lafer_12.jpg \n inflating: /data/training/Celso_Lafer_20.jpg \n inflating: /data/training/Celso_Lafer_21.jpg \n inflating: /data/training/Celso_Lafer_22.jpg \n inflating: /data/training/Chadha_Gurinder_10.jpg \n inflating: /data/training/Chadha_Gurinder_11.jpg \n inflating: /data/training/Chadha_Gurinder_12.jpg \n inflating: /data/training/Chadha_Gurinder_20.jpg \n inflating: /data/training/Chadha_Gurinder_21.jpg \n inflating: /data/training/Chadha_Gurinder_22.jpg \n inflating: /data/training/Chadha_Gurinder_50.jpg \n inflating: /data/training/Chadha_Gurinder_51.jpg \n inflating: /data/training/Chadha_Gurinder_52.jpg \n inflating: /data/training/Charles_Bronson_00.jpg \n inflating: /data/training/Charles_Bronson_01.jpg \n inflating: /data/training/Charles_Bronson_02.jpg \n inflating: /data/training/Charles_Bronson_10.jpg \n inflating: /data/training/Charles_Bronson_11.jpg \n inflating: /data/training/Charles_Bronson_12.jpg \n inflating: /data/training/Charles_Bronson_50.jpg \n inflating: /data/training/Charles_Bronson_51.jpg \n inflating: /data/training/Charles_Bronson_52.jpg \n inflating: /data/training/Charlie_Coles_00.jpg \n inflating: /data/training/Charlie_Coles_01.jpg \n inflating: /data/training/Charlie_Coles_02.jpg \n inflating: /data/training/Charlie_Coles_10.jpg \n inflating: /data/training/Charlie_Coles_11.jpg \n inflating: /data/training/Charlie_Coles_12.jpg \n inflating: /data/training/Charlie_Coles_20.jpg \n inflating: /data/training/Charlie_Coles_21.jpg \n inflating: /data/training/Charlie_Coles_22.jpg \n inflating: /data/training/Charlize_Theron_10.jpg \n inflating: /data/training/Charlize_Theron_11.jpg \n inflating: /data/training/Charlize_Theron_12.jpg \n inflating: /data/training/Charlize_Theron_30.jpg \n inflating: /data/training/Charlize_Theron_31.jpg \n inflating: /data/training/Charlize_Theron_32.jpg \n inflating: /data/training/Charlize_Theron_50.jpg \n inflating: /data/training/Charlize_Theron_51.jpg \n inflating: /data/training/Charlize_Theron_52.jpg \n inflating: /data/training/Charlotte_Casiraghi_00.jpg \n inflating: /data/training/Charlotte_Casiraghi_01.jpg \n inflating: /data/training/Charlotte_Casiraghi_02.jpg \n inflating: /data/training/Charlotte_Casiraghi_10.jpg \n inflating: /data/training/Charlotte_Casiraghi_11.jpg \n inflating: /data/training/Charlotte_Casiraghi_12.jpg \n inflating: /data/training/Charlotte_Casiraghi_20.jpg \n inflating: /data/training/Charlotte_Casiraghi_21.jpg \n inflating: /data/training/Charlotte_Casiraghi_22.jpg \n inflating: /data/training/Charlotte_Rampling_00.jpg \n inflating: /data/training/Charlotte_Rampling_01.jpg \n inflating: /data/training/Charlotte_Rampling_02.jpg \n inflating: /data/training/Charlotte_Rampling_30.jpg \n inflating: /data/training/Charlotte_Rampling_31.jpg \n inflating: /data/training/Charlotte_Rampling_32.jpg \n inflating: /data/training/Charlotte_Rampling_40.jpg \n inflating: /data/training/Charlotte_Rampling_41.jpg \n inflating: /data/training/Charlotte_Rampling_42.jpg \n inflating: /data/training/Charlotte_Rampling_50.jpg \n inflating: /data/training/Charlotte_Rampling_51.jpg \n inflating: /data/training/Charlotte_Rampling_52.jpg \n inflating: /data/training/Cherie_Blair_00.jpg \n inflating: /data/training/Cherie_Blair_01.jpg \n inflating: /data/training/Cherie_Blair_02.jpg \n inflating: /data/training/Cherie_Blair_20.jpg \n inflating: /data/training/Cherie_Blair_21.jpg \n inflating: /data/training/Cherie_Blair_22.jpg \n inflating: /data/training/Cherie_Blair_30.jpg \n inflating: /data/training/Cherie_Blair_31.jpg \n inflating: /data/training/Cherie_Blair_32.jpg \n inflating: /data/training/Cherie_Blair_40.jpg \n inflating: /data/training/Cherie_Blair_41.jpg \n inflating: /data/training/Cherie_Blair_42.jpg \n inflating: /data/training/Chita_Rivera_00.jpg \n inflating: /data/training/Chita_Rivera_01.jpg \n inflating: /data/training/Chita_Rivera_02.jpg \n inflating: /data/training/Chita_Rivera_10.jpg \n inflating: /data/training/Chita_Rivera_11.jpg \n inflating: /data/training/Chita_Rivera_12.jpg \n inflating: /data/training/Chita_Rivera_30.jpg \n inflating: /data/training/Chita_Rivera_31.jpg \n inflating: /data/training/Chita_Rivera_32.jpg \n inflating: /data/training/Chris_Cirino_20.jpg \n inflating: /data/training/Chris_Cirino_21.jpg \n inflating: /data/training/Chris_Cirino_22.jpg \n inflating: /data/training/Chris_Cirino_30.jpg \n inflating: /data/training/Chris_Cirino_31.jpg \n inflating: /data/training/Chris_Cirino_32.jpg \n inflating: /data/training/Chris_Cirino_50.jpg \n inflating: /data/training/Chris_Cirino_51.jpg \n inflating: /data/training/Chris_Cirino_52.jpg \n inflating: /data/training/Chris_Cooper_00.jpg \n inflating: /data/training/Chris_Cooper_01.jpg \n inflating: /data/training/Chris_Cooper_02.jpg \n inflating: /data/training/Chris_Cooper_30.jpg \n inflating: /data/training/Chris_Cooper_31.jpg \n inflating: /data/training/Chris_Cooper_32.jpg \n inflating: /data/training/Chris_Cooper_40.jpg \n inflating: /data/training/Chris_Cooper_41.jpg \n inflating: /data/training/Chris_Cooper_42.jpg \n inflating: /data/training/Chris_Matthews_00.jpg \n inflating: /data/training/Chris_Matthews_01.jpg \n inflating: /data/training/Chris_Matthews_02.jpg \n inflating: /data/training/Chris_Matthews_10.jpg \n inflating: /data/training/Chris_Matthews_11.jpg \n inflating: /data/training/Chris_Matthews_12.jpg \n inflating: /data/training/Chris_Matthews_30.jpg \n inflating: /data/training/Chris_Matthews_31.jpg \n inflating: /data/training/Chris_Matthews_32.jpg \n inflating: /data/training/Chris_Matthews_50.jpg \n inflating: /data/training/Chris_Matthews_51.jpg \n inflating: /data/training/Chris_Matthews_52.jpg \n inflating: /data/training/Chris_Noth_00.jpg \n inflating: /data/training/Chris_Noth_01.jpg \n inflating: /data/training/Chris_Noth_02.jpg \n inflating: /data/training/Chris_Noth_10.jpg \n inflating: /data/training/Chris_Noth_11.jpg \n inflating: /data/training/Chris_Noth_12.jpg \n inflating: /data/training/Chris_Noth_30.jpg \n inflating: /data/training/Chris_Noth_31.jpg \n inflating: /data/training/Chris_Noth_32.jpg \n inflating: /data/training/Chris_Rock_00.jpg \n inflating: /data/training/Chris_Rock_01.jpg \n inflating: /data/training/Chris_Rock_02.jpg \n inflating: /data/training/Chris_Rock_10.jpg \n inflating: /data/training/Chris_Rock_11.jpg \n inflating: /data/training/Chris_Rock_12.jpg \n inflating: /data/training/Chris_Rock_20.jpg \n inflating: /data/training/Chris_Rock_21.jpg \n inflating: /data/training/Chris_Rock_22.jpg \n inflating: /data/training/Christine_Ebersole_00.jpg \n inflating: /data/training/Christine_Ebersole_01.jpg \n inflating: /data/training/Christine_Ebersole_02.jpg \n inflating: /data/training/Christine_Ebersole_20.jpg \n inflating: /data/training/Christine_Ebersole_21.jpg \n inflating: /data/training/Christine_Ebersole_22.jpg \n inflating: /data/training/Christine_Ebersole_50.jpg \n inflating: /data/training/Christine_Ebersole_51.jpg \n inflating: /data/training/Christine_Ebersole_52.jpg \n inflating: /data/training/Christopher_Amolsch_20.jpg \n inflating: /data/training/Christopher_Amolsch_21.jpg \n inflating: /data/training/Christopher_Amolsch_22.jpg \n inflating: /data/training/Christopher_Amolsch_40.jpg \n inflating: /data/training/Christopher_Amolsch_41.jpg \n inflating: /data/training/Christopher_Amolsch_42.jpg \n inflating: /data/training/Christopher_Amolsch_50.jpg \n inflating: /data/training/Christopher_Amolsch_51.jpg \n inflating: /data/training/Christopher_Amolsch_52.jpg \n inflating: /data/training/Christopher_Reeve_10.jpg \n inflating: /data/training/Christopher_Reeve_11.jpg \n inflating: /data/training/Christopher_Reeve_12.jpg \n inflating: /data/training/Christopher_Reeve_20.jpg \n inflating: /data/training/Christopher_Reeve_21.jpg \n inflating: /data/training/Christopher_Reeve_22.jpg \n inflating: /data/training/Christopher_Reeve_40.jpg \n inflating: /data/training/Christopher_Reeve_41.jpg \n inflating: /data/training/Christopher_Reeve_42.jpg \n inflating: /data/training/Christopher_Walken_00.jpg \n inflating: /data/training/Christopher_Walken_01.jpg \n inflating: /data/training/Christopher_Walken_02.jpg \n inflating: /data/training/Christopher_Walken_20.jpg \n inflating: /data/training/Christopher_Walken_21.jpg \n inflating: /data/training/Christopher_Walken_22.jpg \n inflating: /data/training/Christopher_Walken_40.jpg \n inflating: /data/training/Christopher_Walken_41.jpg \n inflating: /data/training/Christopher_Walken_42.jpg \n inflating: /data/training/Christopher_Walken_50.jpg \n inflating: /data/training/Christopher_Walken_51.jpg \n inflating: /data/training/Christopher_Walken_52.jpg \n inflating: /data/training/Chuck_Hagel_20.jpg \n inflating: /data/training/Chuck_Hagel_21.jpg \n inflating: /data/training/Chuck_Hagel_22.jpg \n inflating: /data/training/Chuck_Hagel_30.jpg \n inflating: /data/training/Chuck_Hagel_31.jpg \n inflating: /data/training/Chuck_Hagel_32.jpg \n inflating: /data/training/Chuck_Hagel_40.jpg \n inflating: /data/training/Chuck_Hagel_41.jpg \n inflating: /data/training/Chuck_Hagel_42.jpg \n inflating: /data/training/Chuck_Hagel_50.jpg \n inflating: /data/training/Chuck_Hagel_51.jpg \n inflating: /data/training/Chuck_Hagel_52.jpg \n inflating: /data/training/Chuck_Woolery_10.jpg \n inflating: /data/training/Chuck_Woolery_11.jpg \n inflating: /data/training/Chuck_Woolery_12.jpg \n inflating: /data/training/Chuck_Woolery_20.jpg \n inflating: /data/training/Chuck_Woolery_21.jpg \n inflating: /data/training/Chuck_Woolery_22.jpg \n inflating: /data/training/Chuck_Woolery_40.jpg \n inflating: /data/training/Chuck_Woolery_41.jpg \n inflating: /data/training/Chuck_Woolery_42.jpg \n inflating: /data/training/Cindy_Crawford_00.jpg \n inflating: /data/training/Cindy_Crawford_01.jpg \n inflating: /data/training/Cindy_Crawford_02.jpg \n inflating: /data/training/Cindy_Crawford_30.jpg \n inflating: /data/training/Cindy_Crawford_31.jpg \n inflating: /data/training/Cindy_Crawford_32.jpg \n inflating: /data/training/Cindy_Crawford_50.jpg \n inflating: /data/training/Cindy_Crawford_51.jpg \n inflating: /data/training/Cindy_Crawford_52.jpg \n inflating: /data/training/Cindy_Klassen_00.jpg \n inflating: /data/training/Cindy_Klassen_01.jpg \n inflating: /data/training/Cindy_Klassen_02.jpg \n inflating: /data/training/Cindy_Klassen_30.jpg \n inflating: /data/training/Cindy_Klassen_31.jpg \n inflating: /data/training/Cindy_Klassen_32.jpg \n inflating: /data/training/Cindy_Klassen_40.jpg \n inflating: /data/training/Cindy_Klassen_41.jpg \n inflating: /data/training/Cindy_Klassen_42.jpg \n inflating: /data/training/Claire_Danes_30.jpg \n inflating: /data/training/Claire_Danes_31.jpg \n inflating: /data/training/Claire_Danes_32.jpg \n inflating: /data/training/Claire_Danes_40.jpg \n inflating: /data/training/Claire_Danes_41.jpg \n inflating: /data/training/Claire_Danes_42.jpg \n inflating: /data/training/Claire_Danes_50.jpg \n inflating: /data/training/Claire_Danes_51.jpg \n inflating: /data/training/Claire_Danes_52.jpg \n inflating: /data/training/Clark_Randt_10.jpg \n inflating: /data/training/Clark_Randt_11.jpg \n inflating: /data/training/Clark_Randt_12.jpg \n inflating: /data/training/Clark_Randt_20.jpg \n inflating: /data/training/Clark_Randt_21.jpg \n inflating: /data/training/Clark_Randt_22.jpg \n inflating: /data/training/Clark_Randt_40.jpg \n inflating: /data/training/Clark_Randt_41.jpg \n inflating: /data/training/Clark_Randt_42.jpg \n inflating: /data/training/Clark_Randt_50.jpg \n inflating: /data/training/Clark_Randt_51.jpg \n inflating: /data/training/Clark_Randt_52.jpg \n inflating: /data/training/Clay_Aiken_00.jpg \n inflating: /data/training/Clay_Aiken_01.jpg \n inflating: /data/training/Clay_Aiken_02.jpg \n inflating: /data/training/Clay_Aiken_30.jpg \n inflating: /data/training/Clay_Aiken_31.jpg \n inflating: /data/training/Clay_Aiken_32.jpg \n inflating: /data/training/Clay_Aiken_40.jpg \n inflating: /data/training/Clay_Aiken_41.jpg \n inflating: /data/training/Clay_Aiken_42.jpg \n inflating: /data/training/Clay_Aiken_50.jpg \n inflating: /data/training/Clay_Aiken_51.jpg \n inflating: /data/training/Clay_Aiken_52.jpg \n inflating: /data/training/Clint_Howard_00.jpg \n inflating: /data/training/Clint_Howard_01.jpg \n inflating: /data/training/Clint_Howard_02.jpg \n inflating: /data/training/Clint_Howard_10.jpg \n inflating: /data/training/Clint_Howard_11.jpg \n inflating: /data/training/Clint_Howard_12.jpg \n inflating: /data/training/Clint_Howard_20.jpg \n inflating: /data/training/Clint_Howard_21.jpg \n inflating: /data/training/Clint_Howard_22.jpg \n inflating: /data/training/Clint_Howard_30.jpg \n inflating: /data/training/Clint_Howard_31.jpg \n inflating: /data/training/Clint_Howard_32.jpg \n inflating: /data/training/Clive_Lloyd_30.jpg \n inflating: /data/training/Clive_Lloyd_31.jpg \n inflating: /data/training/Clive_Lloyd_32.jpg \n inflating: /data/training/Clive_Lloyd_40.jpg \n inflating: /data/training/Clive_Lloyd_41.jpg \n inflating: /data/training/Clive_Lloyd_42.jpg \n inflating: /data/training/Clive_Lloyd_50.jpg \n inflating: /data/training/Clive_Lloyd_51.jpg \n inflating: /data/training/Clive_Lloyd_52.jpg \n inflating: /data/training/Colin_Powell_10.jpg \n inflating: /data/training/Colin_Powell_11.jpg \n inflating: /data/training/Colin_Powell_12.jpg \n inflating: /data/training/Colin_Powell_40.jpg \n inflating: /data/training/Colin_Powell_41.jpg \n inflating: /data/training/Colin_Powell_42.jpg \n inflating: /data/training/Colin_Powell_50.jpg \n inflating: /data/training/Colin_Powell_51.jpg \n inflating: /data/training/Colin_Powell_52.jpg \n inflating: /data/training/Conan_OBrien_00.jpg \n inflating: /data/training/Conan_OBrien_01.jpg \n inflating: /data/training/Conan_OBrien_02.jpg \n inflating: /data/training/Conan_OBrien_10.jpg \n inflating: /data/training/Conan_OBrien_11.jpg \n inflating: /data/training/Conan_OBrien_12.jpg \n inflating: /data/training/Conan_OBrien_50.jpg \n inflating: /data/training/Conan_OBrien_51.jpg \n inflating: /data/training/Conan_OBrien_52.jpg \n inflating: /data/training/Condoleezza_Rice_10.jpg \n inflating: /data/training/Condoleezza_Rice_11.jpg \n inflating: /data/training/Condoleezza_Rice_12.jpg \n inflating: /data/training/Condoleezza_Rice_20.jpg \n inflating: /data/training/Condoleezza_Rice_21.jpg \n inflating: /data/training/Condoleezza_Rice_22.jpg \n inflating: /data/training/Condoleezza_Rice_30.jpg \n inflating: /data/training/Condoleezza_Rice_31.jpg \n inflating: /data/training/Condoleezza_Rice_32.jpg \n inflating: /data/training/Condoleezza_Rice_40.jpg \n inflating: /data/training/Condoleezza_Rice_41.jpg \n inflating: /data/training/Condoleezza_Rice_42.jpg \n inflating: /data/training/Connie_Chung_20.jpg \n inflating: /data/training/Connie_Chung_21.jpg \n inflating: /data/training/Connie_Chung_22.jpg \n inflating: /data/training/Connie_Chung_30.jpg \n inflating: /data/training/Connie_Chung_31.jpg \n inflating: /data/training/Connie_Chung_32.jpg \n inflating: /data/training/Connie_Chung_40.jpg \n inflating: /data/training/Connie_Chung_41.jpg \n inflating: /data/training/Connie_Chung_42.jpg \n inflating: /data/training/Connie_Chung_50.jpg \n inflating: /data/training/Connie_Chung_51.jpg \n inflating: /data/training/Connie_Chung_52.jpg \n inflating: /data/training/Craig_David_10.jpg \n inflating: /data/training/Craig_David_11.jpg \n inflating: /data/training/Craig_David_12.jpg \n inflating: /data/training/Craig_David_20.jpg \n inflating: /data/training/Craig_David_21.jpg \n inflating: /data/training/Craig_David_22.jpg \n inflating: /data/training/Craig_David_30.jpg \n inflating: /data/training/Craig_David_31.jpg \n inflating: /data/training/Craig_David_32.jpg \n inflating: /data/training/Craig_David_50.jpg \n inflating: /data/training/Craig_David_51.jpg \n inflating: /data/training/Craig_David_52.jpg \n inflating: /data/training/Cristina_Fernandez_30.jpg \n inflating: /data/training/Cristina_Fernandez_31.jpg \n inflating: /data/training/Cristina_Fernandez_32.jpg \n inflating: /data/training/Cristina_Fernandez_40.jpg \n inflating: /data/training/Cristina_Fernandez_41.jpg \n inflating: /data/training/Cristina_Fernandez_42.jpg \n inflating: /data/training/Cristina_Fernandez_50.jpg \n inflating: /data/training/Cristina_Fernandez_51.jpg \n inflating: /data/training/Cristina_Fernandez_52.jpg \n inflating: /data/training/Cristina_Saralegui_00.jpg \n inflating: /data/training/Cristina_Saralegui_01.jpg \n inflating: /data/training/Cristina_Saralegui_02.jpg \n inflating: /data/training/Cristina_Saralegui_30.jpg \n inflating: /data/training/Cristina_Saralegui_31.jpg \n inflating: /data/training/Cristina_Saralegui_32.jpg \n inflating: /data/training/Cristina_Saralegui_50.jpg \n inflating: /data/training/Cristina_Saralegui_51.jpg \n inflating: /data/training/Cristina_Saralegui_52.jpg \n inflating: /data/training/Dai_Bachtiar_00.jpg \n inflating: /data/training/Dai_Bachtiar_01.jpg \n inflating: /data/training/Dai_Bachtiar_02.jpg \n inflating: /data/training/Dai_Bachtiar_10.jpg \n inflating: /data/training/Dai_Bachtiar_11.jpg \n inflating: /data/training/Dai_Bachtiar_12.jpg \n inflating: /data/training/Dai_Bachtiar_20.jpg \n inflating: /data/training/Dai_Bachtiar_21.jpg \n inflating: /data/training/Dai_Bachtiar_22.jpg \n inflating: /data/training/Dai_Bachtiar_50.jpg \n inflating: /data/training/Dai_Bachtiar_51.jpg \n inflating: /data/training/Dai_Bachtiar_52.jpg \n inflating: /data/training/Daisy_Fuentes_20.jpg \n inflating: /data/training/Daisy_Fuentes_21.jpg \n inflating: /data/training/Daisy_Fuentes_22.jpg \n inflating: /data/training/Daisy_Fuentes_30.jpg \n inflating: /data/training/Daisy_Fuentes_31.jpg \n inflating: /data/training/Daisy_Fuentes_32.jpg \n inflating: /data/training/Daisy_Fuentes_40.jpg \n inflating: /data/training/Daisy_Fuentes_41.jpg \n inflating: /data/training/Daisy_Fuentes_42.jpg \n inflating: /data/training/Dan_Ackroyd_00.jpg \n inflating: /data/training/Dan_Ackroyd_01.jpg \n inflating: /data/training/Dan_Ackroyd_02.jpg \n inflating: /data/training/Dan_Ackroyd_20.jpg \n inflating: /data/training/Dan_Ackroyd_21.jpg \n inflating: /data/training/Dan_Ackroyd_22.jpg \n inflating: /data/training/Dan_Ackroyd_30.jpg \n inflating: /data/training/Dan_Ackroyd_31.jpg \n inflating: /data/training/Dan_Ackroyd_32.jpg \n inflating: /data/training/Daniel_Radcliffe_00.jpg \n inflating: /data/training/Daniel_Radcliffe_01.jpg \n inflating: /data/training/Daniel_Radcliffe_02.jpg \n inflating: /data/training/Daniel_Radcliffe_20.jpg \n inflating: /data/training/Daniel_Radcliffe_21.jpg \n inflating: /data/training/Daniel_Radcliffe_22.jpg \n inflating: /data/training/Daniel_Radcliffe_50.jpg \n inflating: /data/training/Daniel_Radcliffe_51.jpg \n inflating: /data/training/Daniel_Radcliffe_52.jpg \n inflating: /data/training/Daniel_Rouse_00.jpg \n inflating: /data/training/Daniel_Rouse_01.jpg \n inflating: /data/training/Daniel_Rouse_02.jpg \n inflating: /data/training/Daniel_Rouse_10.jpg \n inflating: /data/training/Daniel_Rouse_11.jpg \n inflating: /data/training/Daniel_Rouse_12.jpg \n inflating: /data/training/Daniel_Rouse_20.jpg \n inflating: /data/training/Daniel_Rouse_21.jpg \n inflating: /data/training/Daniel_Rouse_22.jpg \n inflating: /data/training/Daniel_Rouse_30.jpg \n inflating: /data/training/Daniel_Rouse_31.jpg \n inflating: /data/training/Daniel_Rouse_32.jpg \n inflating: /data/training/Daniell_Sunjata_10.jpg \n inflating: /data/training/Daniell_Sunjata_11.jpg \n inflating: /data/training/Daniell_Sunjata_12.jpg \n inflating: /data/training/Daniell_Sunjata_20.jpg \n inflating: /data/training/Daniell_Sunjata_21.jpg \n inflating: /data/training/Daniell_Sunjata_22.jpg \n inflating: /data/training/Daniell_Sunjata_40.jpg \n inflating: /data/training/Daniell_Sunjata_41.jpg \n inflating: /data/training/Daniell_Sunjata_42.jpg \n inflating: /data/training/Danny_Glover_10.jpg \n inflating: /data/training/Danny_Glover_11.jpg \n inflating: /data/training/Danny_Glover_12.jpg \n inflating: /data/training/Danny_Glover_30.jpg \n inflating: /data/training/Danny_Glover_31.jpg \n inflating: /data/training/Danny_Glover_32.jpg \n inflating: /data/training/Danny_Glover_50.jpg \n inflating: /data/training/Danny_Glover_51.jpg \n inflating: /data/training/Danny_Glover_52.jpg \n inflating: /data/training/Darrell_Issa_00.jpg \n inflating: /data/training/Darrell_Issa_01.jpg \n inflating: /data/training/Darrell_Issa_02.jpg \n inflating: /data/training/Darrell_Issa_20.jpg \n inflating: /data/training/Darrell_Issa_21.jpg \n inflating: /data/training/Darrell_Issa_22.jpg \n inflating: /data/training/Darrell_Issa_30.jpg \n inflating: /data/training/Darrell_Issa_31.jpg \n inflating: /data/training/Darrell_Issa_32.jpg \n inflating: /data/training/Darrell_Issa_40.jpg \n inflating: /data/training/Darrell_Issa_41.jpg \n inflating: /data/training/Darrell_Issa_42.jpg \n inflating: /data/training/Dave_Campo_10.jpg \n inflating: /data/training/Dave_Campo_11.jpg \n inflating: /data/training/Dave_Campo_12.jpg \n inflating: /data/training/Dave_Campo_20.jpg \n inflating: /data/training/Dave_Campo_21.jpg \n inflating: /data/training/Dave_Campo_22.jpg \n inflating: /data/training/Dave_Campo_30.jpg \n inflating: /data/training/Dave_Campo_31.jpg \n inflating: /data/training/Dave_Campo_32.jpg \n inflating: /data/training/David_Brent_00.jpg \n inflating: /data/training/David_Brent_01.jpg \n inflating: /data/training/David_Brent_02.jpg \n inflating: /data/training/David_Brent_10.jpg \n inflating: /data/training/David_Brent_11.jpg \n inflating: /data/training/David_Brent_12.jpg \n inflating: /data/training/David_Brent_20.jpg \n inflating: /data/training/David_Brent_21.jpg \n inflating: /data/training/David_Brent_22.jpg \n inflating: /data/training/David_Brent_30.jpg \n inflating: /data/training/David_Brent_31.jpg \n inflating: /data/training/David_Brent_32.jpg \n inflating: /data/training/David_Caruso_00.jpg \n inflating: /data/training/David_Caruso_01.jpg \n inflating: /data/training/David_Caruso_02.jpg \n inflating: /data/training/David_Caruso_10.jpg \n inflating: /data/training/David_Caruso_11.jpg \n inflating: /data/training/David_Caruso_12.jpg \n inflating: /data/training/David_Caruso_30.jpg \n inflating: /data/training/David_Caruso_31.jpg \n inflating: /data/training/David_Caruso_32.jpg \n inflating: /data/training/David_Caruso_40.jpg \n inflating: /data/training/David_Caruso_41.jpg \n inflating: /data/training/David_Caruso_42.jpg \n inflating: /data/training/Ed_Rendell_00.jpg \n inflating: /data/training/Ed_Rendell_01.jpg \n inflating: /data/training/Ed_Rendell_02.jpg \n inflating: /data/training/Ed_Rendell_20.jpg \n inflating: /data/training/Ed_Rendell_21.jpg \n inflating: /data/training/Ed_Rendell_22.jpg \n inflating: /data/training/Ed_Rendell_50.jpg \n inflating: /data/training/Ed_Rendell_51.jpg \n inflating: /data/training/Ed_Rendell_52.jpg \n inflating: /data/training/Ed_Smart_10.jpg \n inflating: /data/training/Ed_Smart_11.jpg \n inflating: /data/training/Ed_Smart_12.jpg \n inflating: /data/training/Ed_Smart_30.jpg \n inflating: /data/training/Ed_Smart_31.jpg \n inflating: /data/training/Ed_Smart_32.jpg \n inflating: /data/training/Ed_Smart_50.jpg \n inflating: /data/training/Ed_Smart_51.jpg \n inflating: /data/training/Ed_Smart_52.jpg \n inflating: /data/training/Edie_Falco_20.jpg \n inflating: /data/training/Edie_Falco_21.jpg \n inflating: /data/training/Edie_Falco_22.jpg \n inflating: /data/training/Edie_Falco_30.jpg \n inflating: /data/training/Edie_Falco_31.jpg \n inflating: /data/training/Edie_Falco_32.jpg \n inflating: /data/training/Edie_Falco_40.jpg \n inflating: /data/training/Edie_Falco_41.jpg \n inflating: /data/training/Edie_Falco_42.jpg \n inflating: /data/training/Edie_Falco_50.jpg \n inflating: /data/training/Edie_Falco_51.jpg \n inflating: /data/training/Edie_Falco_52.jpg \n inflating: /data/training/Eduardo_Duhalde_00.jpg \n inflating: /data/training/Eduardo_Duhalde_01.jpg \n inflating: /data/training/Eduardo_Duhalde_02.jpg \n inflating: /data/training/Eduardo_Duhalde_10.jpg \n inflating: /data/training/Eduardo_Duhalde_11.jpg \n inflating: /data/training/Eduardo_Duhalde_12.jpg \n inflating: /data/training/Eduardo_Duhalde_30.jpg \n inflating: /data/training/Eduardo_Duhalde_31.jpg \n inflating: /data/training/Eduardo_Duhalde_32.jpg \n inflating: /data/training/Edward_Burns_10.jpg \n inflating: /data/training/Edward_Burns_11.jpg \n inflating: /data/training/Edward_Burns_12.jpg \n inflating: /data/training/Edward_Burns_20.jpg \n inflating: /data/training/Edward_Burns_21.jpg \n inflating: /data/training/Edward_Burns_22.jpg \n inflating: /data/training/Edward_Burns_30.jpg \n inflating: /data/training/Edward_Burns_31.jpg \n inflating: /data/training/Edward_Burns_32.jpg \n inflating: /data/training/Edward_Burns_50.jpg \n inflating: /data/training/Edward_Burns_51.jpg \n inflating: /data/training/Edward_Burns_52.jpg \n inflating: /data/training/Edward_Norton_10.jpg \n inflating: /data/training/Edward_Norton_11.jpg \n inflating: /data/training/Edward_Norton_12.jpg \n inflating: /data/training/Edward_Norton_30.jpg \n inflating: /data/training/Edward_Norton_31.jpg \n inflating: /data/training/Edward_Norton_32.jpg \n inflating: /data/training/Edward_Norton_40.jpg \n inflating: /data/training/Edward_Norton_41.jpg \n inflating: /data/training/Edward_Norton_42.jpg \n inflating: /data/training/Edward_Norton_50.jpg \n inflating: /data/training/Edward_Norton_51.jpg \n inflating: /data/training/Edward_Norton_52.jpg \n inflating: /data/training/Elaine_Chao_00.jpg \n inflating: /data/training/Elaine_Chao_01.jpg \n inflating: /data/training/Elaine_Chao_02.jpg \n inflating: /data/training/Elaine_Chao_20.jpg \n inflating: /data/training/Elaine_Chao_21.jpg \n inflating: /data/training/Elaine_Chao_22.jpg \n inflating: /data/training/Elaine_Chao_50.jpg \n inflating: /data/training/Elaine_Chao_51.jpg \n inflating: /data/training/Elaine_Chao_52.jpg \n inflating: /data/training/Elaine_Stritch_10.jpg \n inflating: /data/training/Elaine_Stritch_11.jpg \n inflating: /data/training/Elaine_Stritch_12.jpg \n inflating: /data/training/Elaine_Stritch_40.jpg \n inflating: /data/training/Elaine_Stritch_41.jpg \n inflating: /data/training/Elaine_Stritch_42.jpg \n inflating: /data/training/Elaine_Stritch_50.jpg \n inflating: /data/training/Elaine_Stritch_51.jpg \n inflating: /data/training/Elaine_Stritch_52.jpg \n inflating: /data/training/Eliane_Karp_00.jpg \n inflating: /data/training/Eliane_Karp_01.jpg \n inflating: /data/training/Eliane_Karp_02.jpg \n inflating: /data/training/Eliane_Karp_10.jpg \n inflating: /data/training/Eliane_Karp_11.jpg \n inflating: /data/training/Eliane_Karp_12.jpg \n inflating: /data/training/Eliane_Karp_30.jpg \n inflating: /data/training/Eliane_Karp_31.jpg \n inflating: /data/training/Eliane_Karp_32.jpg \n inflating: /data/training/Eliane_Karp_40.jpg \n inflating: /data/training/Eliane_Karp_41.jpg \n inflating: /data/training/Eliane_Karp_42.jpg \n inflating: /data/training/Elijah_Wood_00.jpg \n inflating: /data/training/Elijah_Wood_01.jpg \n inflating: /data/training/Elijah_Wood_02.jpg \n inflating: /data/training/Elijah_Wood_10.jpg \n inflating: /data/training/Elijah_Wood_11.jpg \n inflating: /data/training/Elijah_Wood_12.jpg \n inflating: /data/training/Elijah_Wood_30.jpg \n inflating: /data/training/Elijah_Wood_31.jpg \n inflating: /data/training/Elijah_Wood_32.jpg \n inflating: /data/training/Eliza_Dushku_00.jpg \n inflating: /data/training/Eliza_Dushku_01.jpg \n inflating: /data/training/Eliza_Dushku_02.jpg \n inflating: /data/training/Eliza_Dushku_10.jpg \n inflating: /data/training/Eliza_Dushku_11.jpg \n inflating: /data/training/Eliza_Dushku_12.jpg \n inflating: /data/training/Eliza_Dushku_20.jpg \n inflating: /data/training/Eliza_Dushku_21.jpg \n inflating: /data/training/Eliza_Dushku_22.jpg \n inflating: /data/training/Eliza_Dushku_30.jpg \n inflating: /data/training/Eliza_Dushku_31.jpg \n inflating: /data/training/Eliza_Dushku_32.jpg \n inflating: /data/training/Elizabeth_Dole_00.jpg \n inflating: /data/training/Elizabeth_Dole_01.jpg \n inflating: /data/training/Elizabeth_Dole_02.jpg \n inflating: /data/training/Elizabeth_Dole_10.jpg \n inflating: /data/training/Elizabeth_Dole_11.jpg \n inflating: /data/training/Elizabeth_Dole_12.jpg \n inflating: /data/training/Elizabeth_Dole_30.jpg \n inflating: /data/training/Elizabeth_Dole_31.jpg \n inflating: /data/training/Elizabeth_Dole_32.jpg \n inflating: /data/training/Elizabeth_Shue_00.jpg \n inflating: /data/training/Elizabeth_Shue_01.jpg \n inflating: /data/training/Elizabeth_Shue_02.jpg \n inflating: /data/training/Elizabeth_Shue_20.jpg \n inflating: /data/training/Elizabeth_Shue_21.jpg \n inflating: /data/training/Elizabeth_Shue_22.jpg \n inflating: /data/training/Elizabeth_Shue_40.jpg \n inflating: /data/training/Elizabeth_Shue_41.jpg \n inflating: /data/training/Elizabeth_Shue_42.jpg \n inflating: /data/training/Ellen_DeGeneres_10.jpg \n inflating: /data/training/Ellen_DeGeneres_11.jpg \n inflating: /data/training/Ellen_DeGeneres_12.jpg \n inflating: /data/training/Ellen_DeGeneres_40.jpg \n inflating: /data/training/Ellen_DeGeneres_41.jpg \n inflating: /data/training/Ellen_DeGeneres_42.jpg \n inflating: /data/training/Ellen_DeGeneres_50.jpg \n inflating: /data/training/Ellen_DeGeneres_51.jpg \n inflating: /data/training/Ellen_DeGeneres_52.jpg \n inflating: /data/training/Elmar_Brok_00.jpg \n inflating: /data/training/Elmar_Brok_01.jpg \n inflating: /data/training/Elmar_Brok_02.jpg \n inflating: /data/training/Elmar_Brok_20.jpg \n inflating: /data/training/Elmar_Brok_21.jpg \n inflating: /data/training/Elmar_Brok_22.jpg \n inflating: /data/training/Elmar_Brok_30.jpg \n inflating: /data/training/Elmar_Brok_31.jpg \n inflating: /data/training/Elmar_Brok_32.jpg \n inflating: /data/training/Elsa_Zylberstein_00.jpg \n inflating: /data/training/Elsa_Zylberstein_01.jpg \n inflating: /data/training/Elsa_Zylberstein_02.jpg \n inflating: /data/training/Elsa_Zylberstein_10.jpg \n inflating: /data/training/Elsa_Zylberstein_11.jpg \n inflating: /data/training/Elsa_Zylberstein_12.jpg \n inflating: /data/training/Elsa_Zylberstein_40.jpg \n inflating: /data/training/Elsa_Zylberstein_41.jpg \n inflating: /data/training/Elsa_Zylberstein_42.jpg \n inflating: /data/training/Elton_John_10.jpg \n inflating: /data/training/Elton_John_11.jpg \n inflating: /data/training/Elton_John_12.jpg \n inflating: /data/training/Elton_John_20.jpg \n inflating: /data/training/Elton_John_21.jpg \n inflating: /data/training/Elton_John_22.jpg \n inflating: /data/training/Elton_John_30.jpg \n inflating: /data/training/Elton_John_31.jpg \n inflating: /data/training/Elton_John_32.jpg \n inflating: /data/training/Elton_John_40.jpg \n inflating: /data/training/Elton_John_41.jpg \n inflating: /data/training/Elton_John_42.jpg \n inflating: /data/training/Emile_Lahoud_00.jpg \n inflating: /data/training/Emile_Lahoud_01.jpg \n inflating: /data/training/Emile_Lahoud_02.jpg \n inflating: /data/training/Emile_Lahoud_30.jpg \n inflating: /data/training/Emile_Lahoud_31.jpg \n inflating: /data/training/Emile_Lahoud_32.jpg \n inflating: /data/training/Emile_Lahoud_40.jpg \n inflating: /data/training/Emile_Lahoud_41.jpg \n inflating: /data/training/Emile_Lahoud_42.jpg \n inflating: /data/training/Emilio_Botin_00.jpg \n inflating: /data/training/Emilio_Botin_01.jpg \n inflating: /data/training/Emilio_Botin_02.jpg \n inflating: /data/training/Emilio_Botin_10.jpg \n inflating: /data/training/Emilio_Botin_11.jpg \n inflating: /data/training/Emilio_Botin_12.jpg \n inflating: /data/training/Emilio_Botin_20.jpg \n inflating: /data/training/Emilio_Botin_21.jpg \n inflating: /data/training/Emilio_Botin_22.jpg \n inflating: /data/training/Emilio_Botin_40.jpg \n inflating: /data/training/Emilio_Botin_41.jpg \n inflating: /data/training/Emilio_Botin_42.jpg \n inflating: /data/training/Emma_Nicholson_10.jpg \n inflating: /data/training/Emma_Nicholson_11.jpg \n inflating: /data/training/Emma_Nicholson_12.jpg \n inflating: /data/training/Emma_Nicholson_20.jpg \n inflating: /data/training/Emma_Nicholson_21.jpg \n inflating: /data/training/Emma_Nicholson_22.jpg \n inflating: /data/training/Emma_Nicholson_30.jpg \n inflating: /data/training/Emma_Nicholson_31.jpg \n inflating: /data/training/Emma_Nicholson_32.jpg \n inflating: /data/training/Emma_Thompson_20.jpg \n inflating: /data/training/Emma_Thompson_21.jpg \n inflating: /data/training/Emma_Thompson_22.jpg \n inflating: /data/training/Emma_Thompson_30.jpg \n inflating: /data/training/Emma_Thompson_31.jpg \n inflating: /data/training/Emma_Thompson_32.jpg \n inflating: /data/training/Emma_Thompson_40.jpg \n inflating: /data/training/Emma_Thompson_41.jpg \n inflating: /data/training/Emma_Thompson_42.jpg \n inflating: /data/training/Emma_Thompson_50.jpg \n inflating: /data/training/Emma_Thompson_51.jpg \n inflating: /data/training/Emma_Thompson_52.jpg \n inflating: /data/training/Emmy_Rossum_20.jpg \n inflating: /data/training/Emmy_Rossum_21.jpg \n inflating: /data/training/Emmy_Rossum_22.jpg \n inflating: /data/training/Emmy_Rossum_30.jpg \n inflating: /data/training/Emmy_Rossum_31.jpg \n inflating: /data/training/Emmy_Rossum_32.jpg \n inflating: /data/training/Emmy_Rossum_40.jpg \n inflating: /data/training/Emmy_Rossum_41.jpg \n inflating: /data/training/Emmy_Rossum_42.jpg \n inflating: /data/training/Emmy_Rossum_50.jpg \n inflating: /data/training/Emmy_Rossum_51.jpg \n inflating: /data/training/Emmy_Rossum_52.jpg \n inflating: /data/training/Eric_Benet_00.jpg \n inflating: /data/training/Eric_Benet_01.jpg \n inflating: /data/training/Eric_Benet_02.jpg \n inflating: /data/training/Eric_Benet_10.jpg \n inflating: /data/training/Eric_Benet_11.jpg \n inflating: /data/training/Eric_Benet_12.jpg \n inflating: /data/training/Eric_Benet_30.jpg \n inflating: /data/training/Eric_Benet_31.jpg \n inflating: /data/training/Eric_Benet_32.jpg \n inflating: /data/training/Erin_Hershey_Presley_10.jpg \n inflating: /data/training/Erin_Hershey_Presley_11.jpg \n inflating: /data/training/Erin_Hershey_Presley_12.jpg \n inflating: /data/training/Erin_Hershey_Presley_30.jpg \n inflating: /data/training/Erin_Hershey_Presley_31.jpg \n inflating: /data/training/Erin_Hershey_Presley_32.jpg \n inflating: /data/training/Erin_Hershey_Presley_40.jpg \n inflating: /data/training/Erin_Hershey_Presley_41.jpg \n inflating: /data/training/Erin_Hershey_Presley_42.jpg \n inflating: /data/training/Ernest_Hollings_00.jpg \n inflating: /data/training/Ernest_Hollings_01.jpg \n inflating: /data/training/Ernest_Hollings_02.jpg \n inflating: /data/training/Ernest_Hollings_10.jpg \n inflating: /data/training/Ernest_Hollings_11.jpg \n inflating: /data/training/Ernest_Hollings_12.jpg \n inflating: /data/training/Ernest_Hollings_20.jpg \n inflating: /data/training/Ernest_Hollings_21.jpg \n inflating: /data/training/Ernest_Hollings_22.jpg \n inflating: /data/training/Ernesto_Zedillo_10.jpg \n inflating: /data/training/Ernesto_Zedillo_11.jpg \n inflating: /data/training/Ernesto_Zedillo_12.jpg \n inflating: /data/training/Ernesto_Zedillo_20.jpg \n inflating: /data/training/Ernesto_Zedillo_21.jpg \n inflating: /data/training/Ernesto_Zedillo_22.jpg \n inflating: /data/training/Ernesto_Zedillo_30.jpg \n inflating: /data/training/Ernesto_Zedillo_31.jpg \n inflating: /data/training/Ernesto_Zedillo_32.jpg \n inflating: /data/training/Ernesto_Zedillo_40.jpg \n inflating: /data/training/Ernesto_Zedillo_41.jpg \n inflating: /data/training/Ernesto_Zedillo_42.jpg \n inflating: /data/training/Ernie_Grunfeld_20.jpg \n inflating: /data/training/Ernie_Grunfeld_21.jpg \n inflating: /data/training/Ernie_Grunfeld_22.jpg \n inflating: /data/training/Ernie_Grunfeld_30.jpg \n inflating: /data/training/Ernie_Grunfeld_31.jpg \n inflating: /data/training/Ernie_Grunfeld_32.jpg \n inflating: /data/training/Ernie_Grunfeld_40.jpg \n inflating: /data/training/Ernie_Grunfeld_41.jpg \n inflating: /data/training/Ernie_Grunfeld_42.jpg \n inflating: /data/training/Ernie_Grunfeld_50.jpg \n inflating: /data/training/Ernie_Grunfeld_51.jpg \n inflating: /data/training/Ernie_Grunfeld_52.jpg \n inflating: /data/training/Estelle_Morris_10.jpg \n inflating: /data/training/Estelle_Morris_11.jpg \n inflating: /data/training/Estelle_Morris_12.jpg \n inflating: /data/training/Estelle_Morris_20.jpg \n inflating: /data/training/Estelle_Morris_21.jpg \n inflating: /data/training/Estelle_Morris_22.jpg \n inflating: /data/training/Estelle_Morris_30.jpg \n inflating: /data/training/Estelle_Morris_31.jpg \n inflating: /data/training/Estelle_Morris_32.jpg \n inflating: /data/training/Ethan_Hawke_00.jpg \n inflating: /data/training/Ethan_Hawke_01.jpg \n inflating: /data/training/Ethan_Hawke_02.jpg \n inflating: /data/training/Ethan_Hawke_10.jpg \n inflating: /data/training/Ethan_Hawke_11.jpg \n inflating: /data/training/Ethan_Hawke_12.jpg \n inflating: /data/training/Ethan_Hawke_30.jpg \n inflating: /data/training/Ethan_Hawke_31.jpg \n inflating: /data/training/Ethan_Hawke_32.jpg \n inflating: /data/training/Ethan_Hawke_40.jpg \n inflating: /data/training/Ethan_Hawke_41.jpg \n inflating: /data/training/Ethan_Hawke_42.jpg \n inflating: /data/training/Eunice_Barber_00.jpg \n inflating: /data/training/Eunice_Barber_01.jpg \n inflating: /data/training/Eunice_Barber_02.jpg \n inflating: /data/training/Eunice_Barber_10.jpg \n inflating: /data/training/Eunice_Barber_11.jpg \n inflating: /data/training/Eunice_Barber_12.jpg \n inflating: /data/training/Eunice_Barber_50.jpg \n inflating: /data/training/Eunice_Barber_51.jpg \n inflating: /data/training/Eunice_Barber_52.jpg \n inflating: /data/training/Fernando_Henrique_Cardoso_00.jpg \n inflating: /data/training/Fernando_Henrique_Cardoso_01.jpg \n inflating: /data/training/Fernando_Henrique_Cardoso_02.jpg \n inflating: /data/training/Fernando_Henrique_Cardoso_20.jpg \n inflating: /data/training/Fernando_Henrique_Cardoso_21.jpg \n inflating: /data/training/Fernando_Henrique_Cardoso_22.jpg \n inflating: /data/training/Fernando_Henrique_Cardoso_30.jpg \n inflating: /data/training/Fernando_Henrique_Cardoso_31.jpg \n inflating: /data/training/Fernando_Henrique_Cardoso_32.jpg \n inflating: /data/training/Fernando_Sanz_30.jpg \n inflating: /data/training/Fernando_Sanz_31.jpg \n inflating: /data/training/Fernando_Sanz_32.jpg \n inflating: /data/training/Fernando_Sanz_40.jpg \n inflating: /data/training/Fernando_Sanz_41.jpg \n inflating: /data/training/Fernando_Sanz_42.jpg \n inflating: /data/training/Fernando_Sanz_50.jpg \n inflating: /data/training/Fernando_Sanz_51.jpg \n inflating: /data/training/Fernando_Sanz_52.jpg \n inflating: /data/training/Fidel_Castro_Daiz-Balart_10.jpg \n inflating: /data/training/Fidel_Castro_Daiz-Balart_11.jpg \n inflating: /data/training/Fidel_Castro_Daiz-Balart_12.jpg \n inflating: /data/training/Fidel_Castro_Daiz-Balart_30.jpg \n inflating: /data/training/Fidel_Castro_Daiz-Balart_31.jpg \n inflating: /data/training/Fidel_Castro_Daiz-Balart_32.jpg \n inflating: /data/training/Fidel_Castro_Daiz-Balart_40.jpg \n inflating: /data/training/Fidel_Castro_Daiz-Balart_41.jpg \n inflating: /data/training/Fidel_Castro_Daiz-Balart_42.jpg \n inflating: /data/training/Flavia_Pennetta_30.jpg \n inflating: /data/training/Flavia_Pennetta_31.jpg \n inflating: /data/training/Flavia_Pennetta_32.jpg \n inflating: /data/training/Flavia_Pennetta_40.jpg \n inflating: /data/training/Flavia_Pennetta_41.jpg \n inflating: /data/training/Flavia_Pennetta_42.jpg \n inflating: /data/training/Flavia_Pennetta_50.jpg \n inflating: /data/training/Flavia_Pennetta_51.jpg \n inflating: /data/training/Flavia_Pennetta_52.jpg \n inflating: /data/training/Florecita_Cobian_00.jpg \n inflating: /data/training/Florecita_Cobian_01.jpg \n inflating: /data/training/Florecita_Cobian_02.jpg \n inflating: /data/training/Florecita_Cobian_10.jpg \n inflating: /data/training/Florecita_Cobian_11.jpg \n inflating: /data/training/Florecita_Cobian_12.jpg \n inflating: /data/training/Florecita_Cobian_20.jpg \n inflating: /data/training/Florecita_Cobian_21.jpg \n inflating: /data/training/Florecita_Cobian_22.jpg \n inflating: /data/training/Frances_Fisher_20.jpg \n inflating: /data/training/Frances_Fisher_21.jpg \n inflating: /data/training/Frances_Fisher_22.jpg \n inflating: /data/training/Frances_Fisher_30.jpg \n inflating: /data/training/Frances_Fisher_31.jpg \n inflating: /data/training/Frances_Fisher_32.jpg \n inflating: /data/training/Frances_Fisher_40.jpg \n inflating: /data/training/Frances_Fisher_41.jpg \n inflating: /data/training/Frances_Fisher_42.jpg \n inflating: /data/training/Francis_Collins_00.jpg \n inflating: /data/training/Francis_Collins_01.jpg \n inflating: /data/training/Francis_Collins_02.jpg \n inflating: /data/training/Francis_Collins_10.jpg \n inflating: /data/training/Francis_Collins_11.jpg \n inflating: /data/training/Francis_Collins_12.jpg \n inflating: /data/training/Francis_Collins_20.jpg \n inflating: /data/training/Francis_Collins_21.jpg \n inflating: /data/training/Francis_Collins_22.jpg \n inflating: /data/training/Francis_Collins_40.jpg \n inflating: /data/training/Francis_Collins_41.jpg \n inflating: /data/training/Francis_Collins_42.jpg \n inflating: /data/training/Frank_Beamer_00.jpg \n inflating: /data/training/Frank_Beamer_01.jpg \n inflating: /data/training/Frank_Beamer_02.jpg \n inflating: /data/training/Frank_Beamer_20.jpg \n inflating: /data/training/Frank_Beamer_21.jpg \n inflating: /data/training/Frank_Beamer_22.jpg \n inflating: /data/training/Frank_Beamer_30.jpg \n inflating: /data/training/Frank_Beamer_31.jpg \n inflating: /data/training/Frank_Beamer_32.jpg \n inflating: /data/training/Frank_Caliendo_10.jpg \n inflating: /data/training/Frank_Caliendo_11.jpg \n inflating: /data/training/Frank_Caliendo_12.jpg \n inflating: /data/training/Frank_Caliendo_30.jpg \n inflating: /data/training/Frank_Caliendo_31.jpg \n inflating: /data/training/Frank_Caliendo_32.jpg \n inflating: /data/training/Frank_Caliendo_40.jpg \n inflating: /data/training/Frank_Caliendo_41.jpg \n inflating: /data/training/Frank_Caliendo_42.jpg \n inflating: /data/training/Frank_Caliendo_50.jpg \n inflating: /data/training/Frank_Caliendo_51.jpg \n inflating: /data/training/Frank_Caliendo_52.jpg \n inflating: /data/training/Frank_Keating_30.jpg \n inflating: /data/training/Frank_Keating_31.jpg \n inflating: /data/training/Frank_Keating_32.jpg \n inflating: /data/training/Frank_Keating_40.jpg \n inflating: /data/training/Frank_Keating_41.jpg \n inflating: /data/training/Frank_Keating_42.jpg \n inflating: /data/training/Frank_Keating_50.jpg \n inflating: /data/training/Frank_Keating_51.jpg \n inflating: /data/training/Frank_Keating_52.jpg \n inflating: /data/training/Frank_Solich_10.jpg \n inflating: /data/training/Frank_Solich_11.jpg \n inflating: /data/training/Frank_Solich_12.jpg \n inflating: /data/training/Frank_Solich_20.jpg \n inflating: /data/training/Frank_Solich_21.jpg \n inflating: /data/training/Frank_Solich_22.jpg \n inflating: /data/training/Frank_Solich_30.jpg \n inflating: /data/training/Frank_Solich_31.jpg \n inflating: /data/training/Frank_Solich_32.jpg \n inflating: /data/training/Franz_Fischler_00.jpg \n inflating: /data/training/Franz_Fischler_01.jpg \n inflating: /data/training/Franz_Fischler_02.jpg \n inflating: /data/training/Franz_Fischler_30.jpg \n inflating: /data/training/Franz_Fischler_31.jpg \n inflating: /data/training/Franz_Fischler_32.jpg \n inflating: /data/training/Franz_Fischler_40.jpg \n inflating: /data/training/Franz_Fischler_41.jpg \n inflating: /data/training/Franz_Fischler_42.jpg \n inflating: /data/training/Franz_Fischler_50.jpg \n inflating: /data/training/Franz_Fischler_51.jpg \n inflating: /data/training/Franz_Fischler_52.jpg \n inflating: /data/training/Gabi_Zimmer_00.jpg \n inflating: /data/training/Gabi_Zimmer_01.jpg \n inflating: /data/training/Gabi_Zimmer_02.jpg \n inflating: /data/training/Gabi_Zimmer_10.jpg \n inflating: /data/training/Gabi_Zimmer_11.jpg \n inflating: /data/training/Gabi_Zimmer_12.jpg \n inflating: /data/training/Gabi_Zimmer_20.jpg \n inflating: /data/training/Gabi_Zimmer_21.jpg \n inflating: /data/training/Gabi_Zimmer_22.jpg \n inflating: /data/training/Gabi_Zimmer_50.jpg \n inflating: /data/training/Gabi_Zimmer_51.jpg \n inflating: /data/training/Gabi_Zimmer_52.jpg \n inflating: /data/training/Gary_Bettman_10.jpg \n inflating: /data/training/Gary_Bettman_11.jpg \n inflating: /data/training/Gary_Bettman_12.jpg \n inflating: /data/training/Gary_Bettman_30.jpg \n inflating: /data/training/Gary_Bettman_31.jpg \n inflating: /data/training/Gary_Bettman_32.jpg \n inflating: /data/training/Gary_Bettman_40.jpg \n inflating: /data/training/Gary_Bettman_41.jpg \n inflating: /data/training/Gary_Bettman_42.jpg \n inflating: /data/training/Gary_Coleman_30.jpg \n inflating: /data/training/Gary_Coleman_31.jpg \n inflating: /data/training/Gary_Coleman_32.jpg \n inflating: /data/training/Gary_Coleman_40.jpg \n inflating: /data/training/Gary_Coleman_41.jpg \n inflating: /data/training/Gary_Coleman_42.jpg \n inflating: /data/training/Gary_Coleman_50.jpg \n inflating: /data/training/Gary_Coleman_51.jpg \n inflating: /data/training/Gary_Coleman_52.jpg \n inflating: /data/training/Gary_Condit_00.jpg \n inflating: /data/training/Gary_Condit_01.jpg \n inflating: /data/training/Gary_Condit_02.jpg \n inflating: /data/training/Gary_Condit_10.jpg \n inflating: /data/training/Gary_Condit_11.jpg \n inflating: /data/training/Gary_Condit_12.jpg \n inflating: /data/training/Gary_Condit_30.jpg \n inflating: /data/training/Gary_Condit_31.jpg \n inflating: /data/training/Gary_Condit_32.jpg \n inflating: /data/training/Gene_Hackman_20.jpg \n inflating: /data/training/Gene_Hackman_21.jpg \n inflating: /data/training/Gene_Hackman_22.jpg \n inflating: /data/training/Gene_Hackman_30.jpg \n inflating: /data/training/Gene_Hackman_31.jpg \n inflating: /data/training/Gene_Hackman_32.jpg \n inflating: /data/training/Gene_Hackman_40.jpg \n inflating: /data/training/Gene_Hackman_41.jpg \n inflating: /data/training/Gene_Hackman_42.jpg \n inflating: /data/training/Geoffrey_Rush_00.jpg \n inflating: /data/training/Geoffrey_Rush_01.jpg \n inflating: /data/training/Geoffrey_Rush_02.jpg \n inflating: /data/training/Geoffrey_Rush_10.jpg \n inflating: /data/training/Geoffrey_Rush_11.jpg \n inflating: /data/training/Geoffrey_Rush_12.jpg \n inflating: /data/training/Geoffrey_Rush_20.jpg \n inflating: /data/training/Geoffrey_Rush_21.jpg \n inflating: /data/training/Geoffrey_Rush_22.jpg \n inflating: /data/training/George_Galloway_00.jpg \n inflating: /data/training/George_Galloway_01.jpg \n inflating: /data/training/George_Galloway_02.jpg \n inflating: /data/training/George_Galloway_20.jpg \n inflating: /data/training/George_Galloway_21.jpg \n inflating: /data/training/George_Galloway_22.jpg \n inflating: /data/training/George_Galloway_40.jpg \n inflating: /data/training/George_Galloway_41.jpg \n inflating: /data/training/George_Galloway_42.jpg \n inflating: /data/training/George_Galloway_50.jpg \n inflating: /data/training/George_Galloway_51.jpg \n inflating: /data/training/George_Galloway_52.jpg \n inflating: /data/training/George_Karl_10.jpg \n inflating: /data/training/George_Karl_11.jpg \n inflating: /data/training/George_Karl_12.jpg \n inflating: /data/training/George_Karl_20.jpg \n inflating: /data/training/George_Karl_21.jpg \n inflating: /data/training/George_Karl_22.jpg \n inflating: /data/training/George_Karl_50.jpg \n inflating: /data/training/George_Karl_51.jpg \n inflating: /data/training/George_Karl_52.jpg \n inflating: /data/training/GL_Peiris_00.jpg \n inflating: /data/training/GL_Peiris_01.jpg \n inflating: /data/training/GL_Peiris_02.jpg \n inflating: /data/training/GL_Peiris_10.jpg \n inflating: /data/training/GL_Peiris_11.jpg \n inflating: /data/training/GL_Peiris_12.jpg \n inflating: /data/training/GL_Peiris_30.jpg \n inflating: /data/training/GL_Peiris_31.jpg \n inflating: /data/training/GL_Peiris_32.jpg \n inflating: /data/training/Hanan_Ashrawi_10.jpg \n inflating: /data/training/Hanan_Ashrawi_11.jpg \n inflating: /data/training/Hanan_Ashrawi_12.jpg \n inflating: /data/training/Hanan_Ashrawi_20.jpg \n inflating: /data/training/Hanan_Ashrawi_21.jpg \n inflating: /data/training/Hanan_Ashrawi_22.jpg \n inflating: /data/training/Hanan_Ashrawi_40.jpg \n inflating: /data/training/Hanan_Ashrawi_41.jpg \n inflating: /data/training/Hanan_Ashrawi_42.jpg \n inflating: /data/training/Harrison_Ford_10.jpg \n inflating: /data/training/Harrison_Ford_11.jpg \n inflating: /data/training/Harrison_Ford_12.jpg \n inflating: /data/training/Harrison_Ford_20.jpg \n inflating: /data/training/Harrison_Ford_21.jpg \n inflating: /data/training/Harrison_Ford_22.jpg \n inflating: /data/training/Harrison_Ford_50.jpg \n inflating: /data/training/Harrison_Ford_51.jpg \n inflating: /data/training/Harrison_Ford_52.jpg \n inflating: /data/training/Hassan_Nasrallah_30.jpg \n inflating: /data/training/Hassan_Nasrallah_31.jpg \n inflating: /data/training/Hassan_Nasrallah_32.jpg \n inflating: /data/training/Hassan_Nasrallah_40.jpg \n inflating: /data/training/Hassan_Nasrallah_41.jpg \n inflating: /data/training/Hassan_Nasrallah_42.jpg \n inflating: /data/training/Hassan_Nasrallah_50.jpg \n inflating: /data/training/Hassan_Nasrallah_51.jpg \n inflating: /data/training/Hassan_Nasrallah_52.jpg \n inflating: /data/training/Irene_Kahn_00.jpg \n inflating: /data/training/Irene_Kahn_01.jpg \n inflating: /data/training/Irene_Kahn_02.jpg \n inflating: /data/training/Irene_Kahn_30.jpg \n inflating: /data/training/Irene_Kahn_31.jpg \n inflating: /data/training/Irene_Kahn_32.jpg \n inflating: /data/training/Irene_Kahn_40.jpg \n inflating: /data/training/Irene_Kahn_41.jpg \n inflating: /data/training/Irene_Kahn_42.jpg \n inflating: /data/training/Isabella_Rossellini_00.jpg \n inflating: /data/training/Isabella_Rossellini_01.jpg \n inflating: /data/training/Isabella_Rossellini_02.jpg \n inflating: /data/training/Isabella_Rossellini_10.jpg \n inflating: /data/training/Isabella_Rossellini_11.jpg \n inflating: /data/training/Isabella_Rossellini_12.jpg \n inflating: /data/training/Isabella_Rossellini_20.jpg \n inflating: /data/training/Isabella_Rossellini_21.jpg \n inflating: /data/training/Isabella_Rossellini_22.jpg \n inflating: /data/training/Isabelle_Huppert_20.jpg \n inflating: /data/training/Isabelle_Huppert_21.jpg \n inflating: /data/training/Isabelle_Huppert_22.jpg \n inflating: /data/training/Isabelle_Huppert_30.jpg \n inflating: /data/training/Isabelle_Huppert_31.jpg \n inflating: /data/training/Isabelle_Huppert_32.jpg \n inflating: /data/training/Isabelle_Huppert_40.jpg \n inflating: /data/training/Isabelle_Huppert_41.jpg \n inflating: /data/training/Isabelle_Huppert_42.jpg \n inflating: /data/training/Itzhak_Perlman_10.jpg \n inflating: /data/training/Itzhak_Perlman_11.jpg \n inflating: /data/training/Itzhak_Perlman_12.jpg \n inflating: /data/training/Itzhak_Perlman_30.jpg \n inflating: /data/training/Itzhak_Perlman_31.jpg \n inflating: /data/training/Itzhak_Perlman_32.jpg \n inflating: /data/training/Itzhak_Perlman_40.jpg \n inflating: /data/training/Itzhak_Perlman_41.jpg \n inflating: /data/training/Itzhak_Perlman_42.jpg \n inflating: /data/training/Jack_Welch_10.jpg \n inflating: /data/training/Jack_Welch_11.jpg \n inflating: /data/training/Jack_Welch_12.jpg \n inflating: /data/training/Jack_Welch_30.jpg \n inflating: /data/training/Jack_Welch_31.jpg \n inflating: /data/training/Jack_Welch_32.jpg \n inflating: /data/training/Jack_Welch_40.jpg \n inflating: /data/training/Jack_Welch_41.jpg \n inflating: /data/training/Jack_Welch_42.jpg \n inflating: /data/training/Jack_Welch_50.jpg \n inflating: /data/training/Jack_Welch_51.jpg \n inflating: /data/training/Jack_Welch_52.jpg \n inflating: /data/training/Jackie_Sherrill_20.jpg \n inflating: /data/training/Jackie_Sherrill_21.jpg \n inflating: /data/training/Jackie_Sherrill_22.jpg \n inflating: /data/training/Jackie_Sherrill_40.jpg \n inflating: /data/training/Jackie_Sherrill_41.jpg \n inflating: /data/training/Jackie_Sherrill_42.jpg \n inflating: /data/training/Jackie_Sherrill_50.jpg \n inflating: /data/training/Jackie_Sherrill_51.jpg \n inflating: /data/training/Jackie_Sherrill_52.jpg \n inflating: /data/training/Jacqueline_Gold_00.jpg \n inflating: /data/training/Jacqueline_Gold_01.jpg \n inflating: /data/training/Jacqueline_Gold_02.jpg \n inflating: /data/training/Jacqueline_Gold_20.jpg \n inflating: /data/training/Jacqueline_Gold_21.jpg \n inflating: /data/training/Jacqueline_Gold_22.jpg \n inflating: /data/training/Jacqueline_Gold_30.jpg \n inflating: /data/training/Jacqueline_Gold_31.jpg \n inflating: /data/training/Jacqueline_Gold_32.jpg \n inflating: /data/training/Jafar_Umar_Thalib_00.jpg \n inflating: /data/training/Jafar_Umar_Thalib_01.jpg \n inflating: /data/training/Jafar_Umar_Thalib_02.jpg \n inflating: /data/training/Jafar_Umar_Thalib_20.jpg \n inflating: /data/training/Jafar_Umar_Thalib_21.jpg \n inflating: /data/training/Jafar_Umar_Thalib_22.jpg \n inflating: /data/training/Jafar_Umar_Thalib_30.jpg \n inflating: /data/training/Jafar_Umar_Thalib_31.jpg \n inflating: /data/training/Jafar_Umar_Thalib_32.jpg \n inflating: /data/training/Jafar_Umar_Thalib_50.jpg \n inflating: /data/training/Jafar_Umar_Thalib_51.jpg \n inflating: /data/training/Jafar_Umar_Thalib_52.jpg \n inflating: /data/training/Jaime_Pressly_00.jpg \n inflating: /data/training/Jaime_Pressly_01.jpg \n inflating: /data/training/Jaime_Pressly_02.jpg \n inflating: /data/training/Jaime_Pressly_10.jpg \n inflating: /data/training/Jaime_Pressly_11.jpg \n inflating: /data/training/Jaime_Pressly_12.jpg \n inflating: /data/training/Jaime_Pressly_40.jpg \n inflating: /data/training/Jaime_Pressly_41.jpg \n inflating: /data/training/Jaime_Pressly_42.jpg \n inflating: /data/training/Jake_Gyllenhaal_00.jpg \n inflating: /data/training/Jake_Gyllenhaal_01.jpg \n inflating: /data/training/Jake_Gyllenhaal_02.jpg \n inflating: /data/training/Jake_Gyllenhaal_40.jpg \n inflating: /data/training/Jake_Gyllenhaal_41.jpg \n inflating: /data/training/Jake_Gyllenhaal_42.jpg \n inflating: /data/training/Jake_Gyllenhaal_50.jpg \n inflating: /data/training/Jake_Gyllenhaal_51.jpg \n inflating: /data/training/Jake_Gyllenhaal_52.jpg \n inflating: /data/training/Jake_Plummer_20.jpg \n inflating: /data/training/Jake_Plummer_21.jpg \n inflating: /data/training/Jake_Plummer_22.jpg \n inflating: /data/training/Jake_Plummer_40.jpg \n inflating: /data/training/Jake_Plummer_41.jpg \n inflating: /data/training/Jake_Plummer_42.jpg \n inflating: /data/training/Jake_Plummer_50.jpg \n inflating: /data/training/Jake_Plummer_51.jpg \n inflating: /data/training/Jake_Plummer_52.jpg \n inflating: /data/training/James_Carville_00.jpg \n inflating: /data/training/James_Carville_01.jpg \n inflating: /data/training/James_Carville_02.jpg \n inflating: /data/training/James_Carville_10.jpg \n inflating: /data/training/James_Carville_11.jpg \n inflating: /data/training/James_Carville_12.jpg \n inflating: /data/training/James_Carville_30.jpg \n inflating: /data/training/James_Carville_31.jpg \n inflating: /data/training/James_Carville_32.jpg \n inflating: /data/training/James_Carville_50.jpg \n inflating: /data/training/James_Carville_51.jpg \n inflating: /data/training/James_Carville_52.jpg \n inflating: /data/training/James_Cunningham_00.jpg \n inflating: /data/training/James_Cunningham_01.jpg \n inflating: /data/training/James_Cunningham_02.jpg \n inflating: /data/training/James_Cunningham_20.jpg \n inflating: /data/training/James_Cunningham_21.jpg \n inflating: /data/training/James_Cunningham_22.jpg \n inflating: /data/training/James_Cunningham_30.jpg \n inflating: /data/training/James_Cunningham_31.jpg \n inflating: /data/training/James_Cunningham_32.jpg \n inflating: /data/training/James_Cunningham_40.jpg \n inflating: /data/training/James_Cunningham_41.jpg \n inflating: /data/training/James_Cunningham_42.jpg \n inflating: /data/training/James_Hoffa_10.jpg \n inflating: /data/training/James_Hoffa_11.jpg \n inflating: /data/training/James_Hoffa_12.jpg \n inflating: /data/training/James_Hoffa_20.jpg \n inflating: /data/training/James_Hoffa_21.jpg \n inflating: /data/training/James_Hoffa_22.jpg \n inflating: /data/training/James_Hoffa_40.jpg \n inflating: /data/training/James_Hoffa_41.jpg \n inflating: /data/training/James_Hoffa_42.jpg \n inflating: /data/training/James_Hoffa_50.jpg \n inflating: /data/training/James_Hoffa_51.jpg \n inflating: /data/training/James_Hoffa_52.jpg \n inflating: /data/training/James_Lockhart_00.jpg \n inflating: /data/training/James_Lockhart_01.jpg \n inflating: /data/training/James_Lockhart_02.jpg \n inflating: /data/training/James_Lockhart_10.jpg \n inflating: /data/training/James_Lockhart_11.jpg \n inflating: /data/training/James_Lockhart_12.jpg \n inflating: /data/training/James_Lockhart_50.jpg \n inflating: /data/training/James_Lockhart_51.jpg \n inflating: /data/training/James_Lockhart_52.jpg \n inflating: /data/training/James_McPherson_00.jpg \n inflating: /data/training/James_McPherson_01.jpg \n inflating: /data/training/James_McPherson_02.jpg \n inflating: /data/training/James_McPherson_10.jpg \n inflating: /data/training/James_McPherson_11.jpg \n inflating: /data/training/James_McPherson_12.jpg \n inflating: /data/training/James_McPherson_20.jpg \n inflating: /data/training/James_McPherson_21.jpg \n inflating: /data/training/James_McPherson_22.jpg \n inflating: /data/training/James_Wolfensohn_00.jpg \n inflating: /data/training/James_Wolfensohn_01.jpg \n inflating: /data/training/James_Wolfensohn_02.jpg \n inflating: /data/training/James_Wolfensohn_20.jpg \n inflating: /data/training/James_Wolfensohn_21.jpg \n inflating: /data/training/James_Wolfensohn_22.jpg \n inflating: /data/training/James_Wolfensohn_30.jpg \n inflating: /data/training/James_Wolfensohn_31.jpg \n inflating: /data/training/James_Wolfensohn_32.jpg \n inflating: /data/training/James_Wolfensohn_50.jpg \n inflating: /data/training/James_Wolfensohn_51.jpg \n inflating: /data/training/James_Wolfensohn_52.jpg \n inflating: /data/training/Jan_Peter_Balkenende_00.jpg \n inflating: /data/training/Jan_Peter_Balkenende_01.jpg \n inflating: /data/training/Jan_Peter_Balkenende_02.jpg \n inflating: /data/training/Jan_Peter_Balkenende_10.jpg \n inflating: /data/training/Jan_Peter_Balkenende_11.jpg \n inflating: /data/training/Jan_Peter_Balkenende_12.jpg \n inflating: /data/training/Jan_Peter_Balkenende_30.jpg \n inflating: /data/training/Jan_Peter_Balkenende_31.jpg \n inflating: /data/training/Jan_Peter_Balkenende_32.jpg \n inflating: /data/training/Jan_Peter_Balkenende_50.jpg \n inflating: /data/training/Jan_Peter_Balkenende_51.jpg \n inflating: /data/training/Jan_Peter_Balkenende_52.jpg \n inflating: /data/training/Jane_Krakowski_00.jpg \n inflating: /data/training/Jane_Krakowski_01.jpg \n inflating: /data/training/Jane_Krakowski_02.jpg \n inflating: /data/training/Jane_Krakowski_10.jpg \n inflating: /data/training/Jane_Krakowski_11.jpg \n inflating: /data/training/Jane_Krakowski_12.jpg \n inflating: /data/training/Jane_Krakowski_40.jpg \n inflating: /data/training/Jane_Krakowski_41.jpg \n inflating: /data/training/Jane_Krakowski_42.jpg \n inflating: /data/training/Jane_Krakowski_50.jpg \n inflating: /data/training/Jane_Krakowski_51.jpg \n inflating: /data/training/Jane_Krakowski_52.jpg \n inflating: /data/training/Jane_Pauley_10.jpg \n inflating: /data/training/Jane_Pauley_11.jpg \n inflating: /data/training/Jane_Pauley_12.jpg \n inflating: /data/training/Jane_Pauley_30.jpg \n inflating: /data/training/Jane_Pauley_31.jpg \n inflating: /data/training/Jane_Pauley_32.jpg \n inflating: /data/training/Jane_Pauley_40.jpg \n inflating: /data/training/Jane_Pauley_41.jpg \n inflating: /data/training/Jane_Pauley_42.jpg \n inflating: /data/training/Jane_Rooney_00.jpg \n inflating: /data/training/Jane_Rooney_01.jpg \n inflating: /data/training/Jane_Rooney_02.jpg \n inflating: /data/training/Jane_Rooney_10.jpg \n inflating: /data/training/Jane_Rooney_11.jpg \n inflating: /data/training/Jane_Rooney_12.jpg \n inflating: /data/training/Jane_Rooney_20.jpg \n inflating: /data/training/Jane_Rooney_21.jpg \n inflating: /data/training/Jane_Rooney_22.jpg \n inflating: /data/training/Janis_Ruth_Coulter_00.jpg \n inflating: /data/training/Janis_Ruth_Coulter_01.jpg \n inflating: /data/training/Janis_Ruth_Coulter_02.jpg \n inflating: /data/training/Janis_Ruth_Coulter_20.jpg \n inflating: /data/training/Janis_Ruth_Coulter_21.jpg \n inflating: /data/training/Janis_Ruth_Coulter_22.jpg \n inflating: /data/training/Janis_Ruth_Coulter_40.jpg \n inflating: /data/training/Janis_Ruth_Coulter_41.jpg \n inflating: /data/training/Janis_Ruth_Coulter_42.jpg \n inflating: /data/training/Janis_Ruth_Coulter_50.jpg \n inflating: /data/training/Janis_Ruth_Coulter_51.jpg \n inflating: /data/training/Janis_Ruth_Coulter_52.jpg \n inflating: /data/training/JK_Rowling_20.jpg \n inflating: /data/training/JK_Rowling_21.jpg \n inflating: /data/training/JK_Rowling_22.jpg \n inflating: /data/training/JK_Rowling_30.jpg \n inflating: /data/training/JK_Rowling_31.jpg \n inflating: /data/training/JK_Rowling_32.jpg \n inflating: /data/training/JK_Rowling_40.jpg \n inflating: /data/training/JK_Rowling_41.jpg \n inflating: /data/training/JK_Rowling_42.jpg \n inflating: /data/training/JK_Rowling_50.jpg \n inflating: /data/training/JK_Rowling_51.jpg \n inflating: /data/training/JK_Rowling_52.jpg \n inflating: /data/training/Kate_Capshaw_10.jpg \n inflating: /data/training/Kate_Capshaw_11.jpg \n inflating: /data/training/Kate_Capshaw_12.jpg \n inflating: /data/training/Kate_Capshaw_20.jpg \n inflating: /data/training/Kate_Capshaw_21.jpg \n inflating: /data/training/Kate_Capshaw_22.jpg \n inflating: /data/training/Kate_Capshaw_40.jpg \n inflating: /data/training/Kate_Capshaw_41.jpg \n inflating: /data/training/Kate_Capshaw_42.jpg \n inflating: /data/training/Kate_Winslet_00.jpg \n inflating: /data/training/Kate_Winslet_01.jpg \n inflating: /data/training/Kate_Winslet_02.jpg \n inflating: /data/training/Kate_Winslet_10.jpg \n inflating: /data/training/Kate_Winslet_11.jpg \n inflating: /data/training/Kate_Winslet_12.jpg \n inflating: /data/training/Kate_Winslet_50.jpg \n inflating: /data/training/Kate_Winslet_51.jpg \n inflating: /data/training/Kate_Winslet_52.jpg \n inflating: /data/training/Katharine_Hepburn_10.jpg \n inflating: /data/training/Katharine_Hepburn_11.jpg \n inflating: /data/training/Katharine_Hepburn_12.jpg \n inflating: /data/training/Katharine_Hepburn_30.jpg \n inflating: /data/training/Katharine_Hepburn_31.jpg \n inflating: /data/training/Katharine_Hepburn_32.jpg \n inflating: /data/training/Katharine_Hepburn_40.jpg \n inflating: /data/training/Katharine_Hepburn_41.jpg \n inflating: /data/training/Katharine_Hepburn_42.jpg \n inflating: /data/training/Kathryn_Morris_10.jpg \n inflating: /data/training/Kathryn_Morris_11.jpg \n inflating: /data/training/Kathryn_Morris_12.jpg \n inflating: /data/training/Kathryn_Morris_20.jpg \n inflating: /data/training/Kathryn_Morris_21.jpg \n inflating: /data/training/Kathryn_Morris_22.jpg \n inflating: /data/training/Kathryn_Morris_40.jpg \n inflating: /data/training/Kathryn_Morris_41.jpg \n inflating: /data/training/Kathryn_Morris_42.jpg \n inflating: /data/training/Kathryn_Morris_50.jpg \n inflating: /data/training/Kathryn_Morris_51.jpg \n inflating: /data/training/Kathryn_Morris_52.jpg \n inflating: /data/training/Katja_Riemann_00.jpg \n inflating: /data/training/Katja_Riemann_01.jpg \n inflating: /data/training/Katja_Riemann_02.jpg \n inflating: /data/training/Katja_Riemann_10.jpg \n inflating: /data/training/Katja_Riemann_11.jpg \n inflating: /data/training/Katja_Riemann_12.jpg \n inflating: /data/training/Katja_Riemann_20.jpg \n inflating: /data/training/Katja_Riemann_21.jpg \n inflating: /data/training/Katja_Riemann_22.jpg \n inflating: /data/training/Keith_Olbermann_00.jpg \n inflating: /data/training/Keith_Olbermann_01.jpg \n inflating: /data/training/Keith_Olbermann_02.jpg \n inflating: /data/training/Keith_Olbermann_10.jpg \n inflating: /data/training/Keith_Olbermann_11.jpg \n inflating: /data/training/Keith_Olbermann_12.jpg \n inflating: /data/training/Keith_Olbermann_20.jpg \n inflating: /data/training/Keith_Olbermann_21.jpg \n inflating: /data/training/Keith_Olbermann_22.jpg \n inflating: /data/training/Keith_Olbermann_50.jpg \n inflating: /data/training/Keith_Olbermann_51.jpg \n inflating: /data/training/Keith_Olbermann_52.jpg \n inflating: /data/training/Keith_Tyson_00.jpg \n inflating: /data/training/Keith_Tyson_01.jpg \n inflating: /data/training/Keith_Tyson_02.jpg \n inflating: /data/training/Keith_Tyson_10.jpg \n inflating: /data/training/Keith_Tyson_11.jpg \n inflating: /data/training/Keith_Tyson_12.jpg \n inflating: /data/training/Keith_Tyson_20.jpg \n inflating: /data/training/Keith_Tyson_21.jpg \n inflating: /data/training/Keith_Tyson_22.jpg \n inflating: /data/training/Kemal_Dervis_00.jpg \n inflating: /data/training/Kemal_Dervis_01.jpg \n inflating: /data/training/Kemal_Dervis_02.jpg \n inflating: /data/training/Kemal_Dervis_10.jpg \n inflating: /data/training/Kemal_Dervis_11.jpg \n inflating: /data/training/Kemal_Dervis_12.jpg \n inflating: /data/training/Kemal_Dervis_30.jpg \n inflating: /data/training/Kemal_Dervis_31.jpg \n inflating: /data/training/Kemal_Dervis_32.jpg \n inflating: /data/training/Kevin_Satterfield_00.jpg \n inflating: /data/training/Kevin_Satterfield_01.jpg \n inflating: /data/training/Kevin_Satterfield_02.jpg \n inflating: /data/training/Kevin_Satterfield_10.jpg \n inflating: /data/training/Kevin_Satterfield_11.jpg \n inflating: /data/training/Kevin_Satterfield_12.jpg \n inflating: /data/training/Kevin_Satterfield_20.jpg \n inflating: /data/training/Kevin_Satterfield_21.jpg \n inflating: /data/training/Kevin_Satterfield_22.jpg \n inflating: /data/training/Kieran_Culkin_00.jpg \n inflating: /data/training/Kieran_Culkin_01.jpg \n inflating: /data/training/Kieran_Culkin_02.jpg \n inflating: /data/training/Kieran_Culkin_10.jpg \n inflating: /data/training/Kieran_Culkin_11.jpg \n inflating: /data/training/Kieran_Culkin_12.jpg \n inflating: /data/training/Kieran_Culkin_20.jpg \n inflating: /data/training/Kieran_Culkin_21.jpg \n inflating: /data/training/Kieran_Culkin_22.jpg \n inflating: /data/training/Kirk_Ferentz_00.jpg \n inflating: /data/training/Kirk_Ferentz_01.jpg \n inflating: /data/training/Kirk_Ferentz_02.jpg \n inflating: /data/training/Kirk_Ferentz_20.jpg \n inflating: /data/training/Kirk_Ferentz_21.jpg \n inflating: /data/training/Kirk_Ferentz_22.jpg \n inflating: /data/training/Kirk_Ferentz_40.jpg \n inflating: /data/training/Kirk_Ferentz_41.jpg \n inflating: /data/training/Kirk_Ferentz_42.jpg \n inflating: /data/training/Kirk_Ferentz_50.jpg \n inflating: /data/training/Kirk_Ferentz_51.jpg \n inflating: /data/training/Kirk_Ferentz_52.jpg \n inflating: /data/training/Kirsten_Dunst_00.jpg \n inflating: /data/training/Kirsten_Dunst_01.jpg \n inflating: /data/training/Kirsten_Dunst_02.jpg \n inflating: /data/training/Kirsten_Dunst_20.jpg \n inflating: /data/training/Kirsten_Dunst_21.jpg \n inflating: /data/training/Kirsten_Dunst_22.jpg \n inflating: /data/training/Kirsten_Dunst_30.jpg \n inflating: /data/training/Kirsten_Dunst_31.jpg \n inflating: /data/training/Kirsten_Dunst_32.jpg \n inflating: /data/training/Kit_Bond_10.jpg \n inflating: /data/training/Kit_Bond_11.jpg \n inflating: /data/training/Kit_Bond_12.jpg \n inflating: /data/training/Kit_Bond_20.jpg \n inflating: /data/training/Kit_Bond_21.jpg \n inflating: /data/training/Kit_Bond_22.jpg \n inflating: /data/training/Kit_Bond_30.jpg \n inflating: /data/training/Kit_Bond_31.jpg \n inflating: /data/training/Kit_Bond_32.jpg \n inflating: /data/training/Kit_Bond_50.jpg \n inflating: /data/training/Kit_Bond_51.jpg \n inflating: /data/training/Kit_Bond_52.jpg \n inflating: /data/training/Kristen_Breitweiser_00.jpg \n inflating: /data/training/Kristen_Breitweiser_01.jpg \n inflating: /data/training/Kristen_Breitweiser_02.jpg \n inflating: /data/training/Kristen_Breitweiser_10.jpg \n inflating: /data/training/Kristen_Breitweiser_11.jpg \n inflating: /data/training/Kristen_Breitweiser_12.jpg \n inflating: /data/training/Kristen_Breitweiser_20.jpg \n inflating: /data/training/Kristen_Breitweiser_21.jpg \n inflating: /data/training/Kristen_Breitweiser_22.jpg \n inflating: /data/training/Kristen_Breitweiser_50.jpg \n inflating: /data/training/Kristen_Breitweiser_51.jpg \n inflating: /data/training/Kristen_Breitweiser_52.jpg \n inflating: /data/training/Kristin_Chenoweth_10.jpg \n inflating: /data/training/Kristin_Chenoweth_11.jpg \n inflating: /data/training/Kristin_Chenoweth_12.jpg \n inflating: /data/training/Kristin_Chenoweth_40.jpg \n inflating: /data/training/Kristin_Chenoweth_41.jpg \n inflating: /data/training/Kristin_Chenoweth_42.jpg \n inflating: /data/training/Kristin_Chenoweth_50.jpg \n inflating: /data/training/Kristin_Chenoweth_51.jpg \n inflating: /data/training/Kristin_Chenoweth_52.jpg \n inflating: /data/training/Kristin_Scott_10.jpg \n inflating: /data/training/Kristin_Scott_11.jpg \n inflating: /data/training/Kristin_Scott_12.jpg \n inflating: /data/training/Kristin_Scott_40.jpg \n inflating: /data/training/Kristin_Scott_41.jpg \n inflating: /data/training/Kristin_Scott_42.jpg \n inflating: /data/training/Kristin_Scott_50.jpg \n inflating: /data/training/Kristin_Scott_51.jpg \n inflating: /data/training/Kristin_Scott_52.jpg \n inflating: /data/training/Kristy_Curry_00.jpg \n inflating: /data/training/Kristy_Curry_01.jpg \n inflating: /data/training/Kristy_Curry_02.jpg \n inflating: /data/training/Kristy_Curry_20.jpg \n inflating: /data/training/Kristy_Curry_21.jpg \n inflating: /data/training/Kristy_Curry_22.jpg \n inflating: /data/training/Kristy_Curry_30.jpg \n inflating: /data/training/Kristy_Curry_31.jpg \n inflating: /data/training/Kristy_Curry_32.jpg \n inflating: /data/training/Kurt_Warner_00.jpg \n inflating: /data/training/Kurt_Warner_01.jpg \n inflating: /data/training/Kurt_Warner_02.jpg \n inflating: /data/training/Kurt_Warner_10.jpg \n inflating: /data/training/Kurt_Warner_11.jpg \n inflating: /data/training/Kurt_Warner_12.jpg \n inflating: /data/training/Kurt_Warner_40.jpg \n inflating: /data/training/Kurt_Warner_41.jpg \n inflating: /data/training/Kurt_Warner_42.jpg \n inflating: /data/training/Kweisi_Mfume_00.jpg \n inflating: /data/training/Kweisi_Mfume_01.jpg \n inflating: /data/training/Kweisi_Mfume_02.jpg \n inflating: /data/training/Kweisi_Mfume_10.jpg \n inflating: /data/training/Kweisi_Mfume_11.jpg \n inflating: /data/training/Kweisi_Mfume_12.jpg \n inflating: /data/training/Kweisi_Mfume_40.jpg \n inflating: /data/training/Kweisi_Mfume_41.jpg \n inflating: /data/training/Kweisi_Mfume_42.jpg \n inflating: /data/training/Kweisi_Mfume_50.jpg \n inflating: /data/training/Kweisi_Mfume_51.jpg \n inflating: /data/training/Kweisi_Mfume_52.jpg \n inflating: /data/training/Kyle_Shewfelt_00.jpg \n inflating: /data/training/Kyle_Shewfelt_01.jpg \n inflating: /data/training/Kyle_Shewfelt_02.jpg \n inflating: /data/training/Kyle_Shewfelt_10.jpg \n inflating: /data/training/Kyle_Shewfelt_11.jpg \n inflating: /data/training/Kyle_Shewfelt_12.jpg \n inflating: /data/training/Kyle_Shewfelt_20.jpg \n inflating: /data/training/Kyle_Shewfelt_21.jpg \n inflating: /data/training/Kyle_Shewfelt_22.jpg \n inflating: /data/training/Kyle_Shewfelt_40.jpg \n inflating: /data/training/Kyle_Shewfelt_41.jpg \n inflating: /data/training/Kyle_Shewfelt_42.jpg \n inflating: /data/training/Larry_Flynt_00.jpg \n inflating: /data/training/Larry_Flynt_01.jpg \n inflating: /data/training/Larry_Flynt_02.jpg \n inflating: /data/training/Larry_Flynt_10.jpg \n inflating: /data/training/Larry_Flynt_11.jpg \n inflating: /data/training/Larry_Flynt_12.jpg \n inflating: /data/training/Larry_Flynt_20.jpg \n inflating: /data/training/Larry_Flynt_21.jpg \n inflating: /data/training/Larry_Flynt_22.jpg \n inflating: /data/training/Laura_Bozzo_00.jpg \n inflating: /data/training/Laura_Bozzo_01.jpg \n inflating: /data/training/Laura_Bozzo_02.jpg \n inflating: /data/training/Laura_Bozzo_10.jpg \n inflating: /data/training/Laura_Bozzo_11.jpg \n inflating: /data/training/Laura_Bozzo_12.jpg \n inflating: /data/training/Laura_Bozzo_40.jpg \n inflating: /data/training/Laura_Bozzo_41.jpg \n inflating: /data/training/Laura_Bozzo_42.jpg \n inflating: /data/training/Laura_Bush_10.jpg \n inflating: /data/training/Laura_Bush_11.jpg \n inflating: /data/training/Laura_Bush_12.jpg \n inflating: /data/training/Laura_Bush_20.jpg \n inflating: /data/training/Laura_Bush_21.jpg \n inflating: /data/training/Laura_Bush_22.jpg \n inflating: /data/training/Laura_Bush_40.jpg \n inflating: /data/training/Laura_Bush_41.jpg \n inflating: /data/training/Laura_Bush_42.jpg \n inflating: /data/training/Laura_Bush_50.jpg \n inflating: /data/training/Laura_Bush_51.jpg \n inflating: /data/training/Laura_Bush_52.jpg \n inflating: /data/training/Laura_Elena_Harring_00.jpg \n inflating: /data/training/Laura_Elena_Harring_01.jpg \n inflating: /data/training/Laura_Elena_Harring_02.jpg \n inflating: /data/training/Laura_Elena_Harring_20.jpg \n inflating: /data/training/Laura_Elena_Harring_21.jpg \n inflating: /data/training/Laura_Elena_Harring_22.jpg \n inflating: /data/training/Laura_Elena_Harring_40.jpg \n inflating: /data/training/Laura_Elena_Harring_41.jpg \n inflating: /data/training/Laura_Elena_Harring_42.jpg \n inflating: /data/training/Laura_Elena_Harring_50.jpg \n inflating: /data/training/Laura_Elena_Harring_51.jpg \n inflating: /data/training/Laura_Elena_Harring_52.jpg \n inflating: /data/training/Laurence_Fishburne_20.jpg \n inflating: /data/training/Laurence_Fishburne_21.jpg \n inflating: /data/training/Laurence_Fishburne_22.jpg \n inflating: /data/training/Laurence_Fishburne_40.jpg \n inflating: /data/training/Laurence_Fishburne_41.jpg \n inflating: /data/training/Laurence_Fishburne_42.jpg \n inflating: /data/training/Laurence_Fishburne_50.jpg \n inflating: /data/training/Laurence_Fishburne_51.jpg \n inflating: /data/training/Laurence_Fishburne_52.jpg \n inflating: /data/training/Lee_Baca_00.jpg \n inflating: /data/training/Lee_Baca_01.jpg \n inflating: /data/training/Lee_Baca_02.jpg \n inflating: /data/training/Lee_Baca_10.jpg \n inflating: /data/training/Lee_Baca_11.jpg \n inflating: /data/training/Lee_Baca_12.jpg \n inflating: /data/training/Lee_Baca_40.jpg \n inflating: /data/training/Lee_Baca_41.jpg \n inflating: /data/training/Lee_Baca_42.jpg \n inflating: /data/training/Lene_Espersen_10.jpg \n inflating: /data/training/Lene_Espersen_11.jpg \n inflating: /data/training/Lene_Espersen_12.jpg \n inflating: /data/training/Lene_Espersen_20.jpg \n inflating: /data/training/Lene_Espersen_21.jpg \n inflating: /data/training/Lene_Espersen_22.jpg \n inflating: /data/training/Lene_Espersen_40.jpg \n inflating: /data/training/Lene_Espersen_41.jpg \n inflating: /data/training/Lene_Espersen_42.jpg \n inflating: /data/training/Lesia_Burlak_00.jpg \n inflating: /data/training/Lesia_Burlak_01.jpg \n inflating: /data/training/Lesia_Burlak_02.jpg \n inflating: /data/training/Lesia_Burlak_20.jpg \n inflating: /data/training/Lesia_Burlak_21.jpg \n inflating: /data/training/Lesia_Burlak_22.jpg \n inflating: /data/training/Lesia_Burlak_30.jpg \n inflating: /data/training/Lesia_Burlak_31.jpg \n inflating: /data/training/Lesia_Burlak_32.jpg \n inflating: /data/training/Lester_Holt_00.jpg \n inflating: /data/training/Lester_Holt_01.jpg \n inflating: /data/training/Lester_Holt_02.jpg \n inflating: /data/training/Lester_Holt_30.jpg \n inflating: /data/training/Lester_Holt_31.jpg \n inflating: /data/training/Lester_Holt_32.jpg \n inflating: /data/training/Lester_Holt_40.jpg \n inflating: /data/training/Lester_Holt_41.jpg \n inflating: /data/training/Lester_Holt_42.jpg \n inflating: /data/training/Leszek_Miller_00.jpg \n inflating: /data/training/Leszek_Miller_01.jpg \n inflating: /data/training/Leszek_Miller_02.jpg \n inflating: /data/training/Leszek_Miller_10.jpg \n inflating: /data/training/Leszek_Miller_11.jpg \n inflating: /data/training/Leszek_Miller_12.jpg \n inflating: /data/training/Leszek_Miller_30.jpg \n inflating: /data/training/Leszek_Miller_31.jpg \n inflating: /data/training/Leszek_Miller_32.jpg \n inflating: /data/training/Leticia_Van_de_Putte_00.jpg \n inflating: /data/training/Leticia_Van_de_Putte_01.jpg \n inflating: /data/training/Leticia_Van_de_Putte_02.jpg \n inflating: /data/training/Leticia_Van_de_Putte_10.jpg \n inflating: /data/training/Leticia_Van_de_Putte_11.jpg \n inflating: /data/training/Leticia_Van_de_Putte_12.jpg \n inflating: /data/training/Leticia_Van_de_Putte_40.jpg \n inflating: /data/training/Leticia_Van_de_Putte_41.jpg \n inflating: /data/training/Leticia_Van_de_Putte_42.jpg \n inflating: /data/training/Leuris_Pupo_00.jpg \n inflating: /data/training/Leuris_Pupo_01.jpg \n inflating: /data/training/Leuris_Pupo_02.jpg \n inflating: /data/training/Leuris_Pupo_20.jpg \n inflating: /data/training/Leuris_Pupo_21.jpg \n inflating: /data/training/Leuris_Pupo_22.jpg \n inflating: /data/training/Leuris_Pupo_30.jpg \n inflating: /data/training/Leuris_Pupo_31.jpg \n inflating: /data/training/Leuris_Pupo_32.jpg \n inflating: /data/training/Leuris_Pupo_40.jpg \n inflating: /data/training/Leuris_Pupo_41.jpg \n inflating: /data/training/Leuris_Pupo_42.jpg \n inflating: /data/training/Li_Zhaoxing_00.jpg \n inflating: /data/training/Li_Zhaoxing_01.jpg \n inflating: /data/training/Li_Zhaoxing_02.jpg \n inflating: /data/training/Li_Zhaoxing_30.jpg \n inflating: /data/training/Li_Zhaoxing_31.jpg \n inflating: /data/training/Li_Zhaoxing_32.jpg \n inflating: /data/training/Li_Zhaoxing_40.jpg \n inflating: /data/training/Li_Zhaoxing_41.jpg \n inflating: /data/training/Li_Zhaoxing_42.jpg \n inflating: /data/training/Lincoln_Chafee_20.jpg \n inflating: /data/training/Lincoln_Chafee_21.jpg \n inflating: /data/training/Lincoln_Chafee_22.jpg \n inflating: /data/training/Lincoln_Chafee_30.jpg \n inflating: /data/training/Lincoln_Chafee_31.jpg \n inflating: /data/training/Lincoln_Chafee_32.jpg \n inflating: /data/training/Lincoln_Chafee_50.jpg \n inflating: /data/training/Lincoln_Chafee_51.jpg \n inflating: /data/training/Lincoln_Chafee_52.jpg \n inflating: /data/training/Linda_Dano_00.jpg \n inflating: /data/training/Linda_Dano_01.jpg \n inflating: /data/training/Linda_Dano_02.jpg \n inflating: /data/training/Linda_Dano_20.jpg \n inflating: /data/training/Linda_Dano_21.jpg \n inflating: /data/training/Linda_Dano_22.jpg \n inflating: /data/training/Linda_Dano_30.jpg \n inflating: /data/training/Linda_Dano_31.jpg \n inflating: /data/training/Linda_Dano_32.jpg \n inflating: /data/training/Linda_Dano_50.jpg \n inflating: /data/training/Linda_Dano_51.jpg \n inflating: /data/training/Linda_Dano_52.jpg \n inflating: /data/training/Linda_Franklin_00.jpg \n inflating: /data/training/Linda_Franklin_01.jpg \n inflating: /data/training/Linda_Franklin_02.jpg \n inflating: /data/training/Linda_Franklin_10.jpg \n inflating: /data/training/Linda_Franklin_11.jpg \n inflating: /data/training/Linda_Franklin_12.jpg \n inflating: /data/training/Linda_Franklin_20.jpg \n inflating: /data/training/Linda_Franklin_21.jpg \n inflating: /data/training/Linda_Franklin_22.jpg \n inflating: /data/training/Linda_Franklin_40.jpg \n inflating: /data/training/Linda_Franklin_41.jpg \n inflating: /data/training/Linda_Franklin_42.jpg \n inflating: /data/training/Linda_Sanchez_00.jpg \n inflating: /data/training/Linda_Sanchez_01.jpg \n inflating: /data/training/Linda_Sanchez_02.jpg \n inflating: /data/training/Linda_Sanchez_10.jpg \n inflating: /data/training/Linda_Sanchez_11.jpg \n inflating: /data/training/Linda_Sanchez_12.jpg \n inflating: /data/training/Linda_Sanchez_20.jpg \n inflating: /data/training/Linda_Sanchez_21.jpg \n inflating: /data/training/Linda_Sanchez_22.jpg \n inflating: /data/training/Linda_Sanchez_40.jpg \n inflating: /data/training/Linda_Sanchez_41.jpg \n inflating: /data/training/Linda_Sanchez_42.jpg \n inflating: /data/training/Lindsey_Graham_00.jpg \n inflating: /data/training/Lindsey_Graham_01.jpg \n inflating: /data/training/Lindsey_Graham_02.jpg \n inflating: /data/training/Lindsey_Graham_10.jpg \n inflating: /data/training/Lindsey_Graham_11.jpg \n inflating: /data/training/Lindsey_Graham_12.jpg \n inflating: /data/training/Lindsey_Graham_20.jpg \n inflating: /data/training/Lindsey_Graham_21.jpg \n inflating: /data/training/Lindsey_Graham_22.jpg \n inflating: /data/training/Lindsey_Graham_30.jpg \n inflating: /data/training/Lindsey_Graham_31.jpg \n inflating: /data/training/Lindsey_Graham_32.jpg \n inflating: /data/training/Lino_Oviedo_00.jpg \n inflating: /data/training/Lino_Oviedo_01.jpg \n inflating: /data/training/Lino_Oviedo_02.jpg \n inflating: /data/training/Lino_Oviedo_30.jpg \n inflating: /data/training/Lino_Oviedo_31.jpg \n inflating: /data/training/Lino_Oviedo_32.jpg \n inflating: /data/training/Lino_Oviedo_50.jpg \n inflating: /data/training/Lino_Oviedo_51.jpg \n inflating: /data/training/Lino_Oviedo_52.jpg \n inflating: /data/training/Lisa_Ling_00.jpg \n inflating: /data/training/Lisa_Ling_01.jpg \n inflating: /data/training/Lisa_Ling_02.jpg \n inflating: /data/training/Lisa_Ling_10.jpg \n inflating: /data/training/Lisa_Ling_11.jpg \n inflating: /data/training/Lisa_Ling_12.jpg \n inflating: /data/training/Lisa_Ling_20.jpg \n inflating: /data/training/Lisa_Ling_21.jpg \n inflating: /data/training/Lisa_Ling_22.jpg \n inflating: /data/training/Liu_Ye_00.jpg \n inflating: /data/training/Liu_Ye_01.jpg \n inflating: /data/training/Liu_Ye_02.jpg \n inflating: /data/training/Liu_Ye_10.jpg \n inflating: /data/training/Liu_Ye_11.jpg \n inflating: /data/training/Liu_Ye_12.jpg \n inflating: /data/training/Liu_Ye_20.jpg \n inflating: /data/training/Liu_Ye_21.jpg \n inflating: /data/training/Liu_Ye_22.jpg \n inflating: /data/training/Liu_Ye_50.jpg \n inflating: /data/training/Liu_Ye_51.jpg \n inflating: /data/training/Liu_Ye_52.jpg \n inflating: /data/training/Loretta_Lynn_Harper_00.jpg \n inflating: /data/training/Loretta_Lynn_Harper_01.jpg \n inflating: /data/training/Loretta_Lynn_Harper_02.jpg \n inflating: /data/training/Loretta_Lynn_Harper_30.jpg \n inflating: /data/training/Loretta_Lynn_Harper_31.jpg \n inflating: /data/training/Loretta_Lynn_Harper_32.jpg \n inflating: /data/training/Loretta_Lynn_Harper_40.jpg \n inflating: /data/training/Loretta_Lynn_Harper_41.jpg \n inflating: /data/training/Loretta_Lynn_Harper_42.jpg \n inflating: /data/training/Loretta_Lynn_Harper_50.jpg \n inflating: /data/training/Loretta_Lynn_Harper_51.jpg \n inflating: /data/training/Loretta_Lynn_Harper_52.jpg \n inflating: /data/training/Louis_Van_Gaal_00.jpg \n inflating: /data/training/Louis_Van_Gaal_01.jpg \n inflating: /data/training/Louis_Van_Gaal_02.jpg \n inflating: /data/training/Louis_Van_Gaal_10.jpg \n inflating: /data/training/Louis_Van_Gaal_11.jpg \n inflating: /data/training/Louis_Van_Gaal_12.jpg \n inflating: /data/training/Louis_Van_Gaal_40.jpg \n inflating: /data/training/Louis_Van_Gaal_41.jpg \n inflating: /data/training/Louis_Van_Gaal_42.jpg \n inflating: /data/training/Louisa_Baileche_00.jpg \n inflating: /data/training/Louisa_Baileche_01.jpg \n inflating: /data/training/Louisa_Baileche_02.jpg \n inflating: /data/training/Louisa_Baileche_10.jpg \n inflating: /data/training/Louisa_Baileche_11.jpg \n inflating: /data/training/Louisa_Baileche_12.jpg \n inflating: /data/training/Louisa_Baileche_20.jpg \n inflating: /data/training/Louisa_Baileche_21.jpg \n inflating: /data/training/Louisa_Baileche_22.jpg \n inflating: /data/training/Luc_Montagnier_20.jpg \n inflating: /data/training/Luc_Montagnier_21.jpg \n inflating: /data/training/Luc_Montagnier_22.jpg \n inflating: /data/training/Luc_Montagnier_40.jpg \n inflating: /data/training/Luc_Montagnier_41.jpg \n inflating: /data/training/Luc_Montagnier_42.jpg \n inflating: /data/training/Luc_Montagnier_50.jpg \n inflating: /data/training/Luc_Montagnier_51.jpg \n inflating: /data/training/Luc_Montagnier_52.jpg \n inflating: /data/training/Lucia_Kenny_Anthony_00.jpg \n inflating: /data/training/Lucia_Kenny_Anthony_01.jpg \n inflating: /data/training/Lucia_Kenny_Anthony_02.jpg \n inflating: /data/training/Lucia_Kenny_Anthony_10.jpg \n inflating: /data/training/Lucia_Kenny_Anthony_11.jpg \n inflating: /data/training/Lucia_Kenny_Anthony_12.jpg \n inflating: /data/training/Lucia_Kenny_Anthony_40.jpg \n inflating: /data/training/Lucia_Kenny_Anthony_41.jpg \n inflating: /data/training/Lucia_Kenny_Anthony_42.jpg \n inflating: /data/training/Lucia_Kenny_Anthony_50.jpg \n inflating: /data/training/Lucia_Kenny_Anthony_51.jpg \n inflating: /data/training/Lucia_Kenny_Anthony_52.jpg \n inflating: /data/training/Lucio_Stanca_00.jpg \n inflating: /data/training/Lucio_Stanca_01.jpg \n inflating: /data/training/Lucio_Stanca_02.jpg \n inflating: /data/training/Lucio_Stanca_20.jpg \n inflating: /data/training/Lucio_Stanca_21.jpg \n inflating: /data/training/Lucio_Stanca_22.jpg \n inflating: /data/training/Lucio_Stanca_30.jpg \n inflating: /data/training/Lucio_Stanca_31.jpg \n inflating: /data/training/Lucio_Stanca_32.jpg \n inflating: /data/training/Lucio_Stanca_40.jpg \n inflating: /data/training/Lucio_Stanca_41.jpg \n inflating: /data/training/Lucio_Stanca_42.jpg \n inflating: /data/training/Luis_Ernesto_Derbez_Bautista_00.jpg \n inflating: /data/training/Luis_Ernesto_Derbez_Bautista_01.jpg \n inflating: /data/training/Luis_Ernesto_Derbez_Bautista_02.jpg \n inflating: /data/training/Luis_Ernesto_Derbez_Bautista_10.jpg \n inflating: /data/training/Luis_Ernesto_Derbez_Bautista_11.jpg \n inflating: /data/training/Luis_Ernesto_Derbez_Bautista_12.jpg \n inflating: /data/training/Luis_Ernesto_Derbez_Bautista_50.jpg \n inflating: /data/training/Luis_Ernesto_Derbez_Bautista_51.jpg \n inflating: /data/training/Luis_Ernesto_Derbez_Bautista_52.jpg \n inflating: /data/training/Luis_Fonsi_20.jpg \n inflating: /data/training/Luis_Fonsi_21.jpg \n inflating: /data/training/Luis_Fonsi_22.jpg \n inflating: /data/training/Luis_Fonsi_40.jpg \n inflating: /data/training/Luis_Fonsi_41.jpg \n inflating: /data/training/Luis_Fonsi_42.jpg \n inflating: /data/training/Luis_Fonsi_50.jpg \n inflating: /data/training/Luis_Fonsi_51.jpg \n inflating: /data/training/Luis_Fonsi_52.jpg \n inflating: /data/training/Lyle_Lovett_20.jpg \n inflating: /data/training/Lyle_Lovett_21.jpg \n inflating: /data/training/Lyle_Lovett_22.jpg \n inflating: /data/training/Lyle_Lovett_40.jpg \n inflating: /data/training/Lyle_Lovett_41.jpg \n inflating: /data/training/Lyle_Lovett_42.jpg \n inflating: /data/training/Lyle_Lovett_50.jpg \n inflating: /data/training/Lyle_Lovett_51.jpg \n inflating: /data/training/Lyle_Lovett_52.jpg \n inflating: /data/training/Mack_Brown_00.jpg \n inflating: /data/training/Mack_Brown_01.jpg \n inflating: /data/training/Mack_Brown_02.jpg \n inflating: /data/training/Mack_Brown_40.jpg \n inflating: /data/training/Mack_Brown_41.jpg \n inflating: /data/training/Mack_Brown_42.jpg \n inflating: /data/training/Mack_Brown_50.jpg \n inflating: /data/training/Mack_Brown_51.jpg \n inflating: /data/training/Mack_Brown_52.jpg \n inflating: /data/training/Maggie_Cheung_00.jpg \n inflating: /data/training/Maggie_Cheung_01.jpg \n inflating: /data/training/Maggie_Cheung_02.jpg \n inflating: /data/training/Maggie_Cheung_30.jpg \n inflating: /data/training/Maggie_Cheung_31.jpg \n inflating: /data/training/Maggie_Cheung_32.jpg \n inflating: /data/training/Maggie_Cheung_50.jpg \n inflating: /data/training/Maggie_Cheung_51.jpg \n inflating: /data/training/Maggie_Cheung_52.jpg \n inflating: /data/training/Maggie_Smith_00.jpg \n inflating: /data/training/Maggie_Smith_01.jpg \n inflating: /data/training/Maggie_Smith_02.jpg \n inflating: /data/training/Maggie_Smith_30.jpg \n inflating: /data/training/Maggie_Smith_31.jpg \n inflating: /data/training/Maggie_Smith_32.jpg \n inflating: /data/training/Maggie_Smith_40.jpg \n inflating: /data/training/Maggie_Smith_41.jpg \n inflating: /data/training/Maggie_Smith_42.jpg \n inflating: /data/training/Mahathir_Mohamad_00.jpg \n inflating: /data/training/Mahathir_Mohamad_01.jpg \n inflating: /data/training/Mahathir_Mohamad_02.jpg \n inflating: /data/training/Mahathir_Mohamad_10.jpg \n inflating: /data/training/Mahathir_Mohamad_11.jpg \n inflating: /data/training/Mahathir_Mohamad_12.jpg \n inflating: /data/training/Mahathir_Mohamad_20.jpg \n inflating: /data/training/Mahathir_Mohamad_21.jpg \n inflating: /data/training/Mahathir_Mohamad_22.jpg \n inflating: /data/training/Mahathir_Mohamad_30.jpg \n inflating: /data/training/Mahathir_Mohamad_31.jpg \n inflating: /data/training/Mahathir_Mohamad_32.jpg \n inflating: /data/training/Malcolm_Jamal_Warner_00.jpg \n inflating: /data/training/Malcolm_Jamal_Warner_01.jpg \n inflating: /data/training/Malcolm_Jamal_Warner_02.jpg \n inflating: /data/training/Malcolm_Jamal_Warner_10.jpg \n inflating: /data/training/Malcolm_Jamal_Warner_11.jpg \n inflating: /data/training/Malcolm_Jamal_Warner_12.jpg \n inflating: /data/training/Malcolm_Jamal_Warner_20.jpg \n inflating: /data/training/Malcolm_Jamal_Warner_21.jpg \n inflating: /data/training/Malcolm_Jamal_Warner_22.jpg \n inflating: /data/training/Manuel_Pellegrini_10.jpg \n inflating: /data/training/Manuel_Pellegrini_11.jpg \n inflating: /data/training/Manuel_Pellegrini_12.jpg \n inflating: /data/training/Manuel_Pellegrini_20.jpg \n inflating: /data/training/Manuel_Pellegrini_21.jpg \n inflating: /data/training/Manuel_Pellegrini_22.jpg \n inflating: /data/training/Manuel_Pellegrini_30.jpg \n inflating: /data/training/Manuel_Pellegrini_31.jpg \n inflating: /data/training/Manuel_Pellegrini_32.jpg \n inflating: /data/training/Marc_Anthony_10.jpg \n inflating: /data/training/Marc_Anthony_11.jpg \n inflating: /data/training/Marc_Anthony_12.jpg \n inflating: /data/training/Marc_Anthony_20.jpg \n inflating: /data/training/Marc_Anthony_21.jpg \n inflating: /data/training/Marc_Anthony_22.jpg \n inflating: /data/training/Marc_Anthony_50.jpg \n inflating: /data/training/Marc_Anthony_51.jpg \n inflating: /data/training/Marc_Anthony_52.jpg \n inflating: /data/training/Marc_Racicot_00.jpg \n inflating: /data/training/Marc_Racicot_01.jpg \n inflating: /data/training/Marc_Racicot_02.jpg \n inflating: /data/training/Marc_Racicot_20.jpg \n inflating: /data/training/Marc_Racicot_21.jpg \n inflating: /data/training/Marc_Racicot_22.jpg \n inflating: /data/training/Marc_Racicot_40.jpg \n inflating: /data/training/Marc_Racicot_41.jpg \n inflating: /data/training/Marc_Racicot_42.jpg \n inflating: /data/training/Marc_Racicot_50.jpg \n inflating: /data/training/Marc_Racicot_51.jpg \n inflating: /data/training/Marc_Racicot_52.jpg \n inflating: /data/training/Marc_Shaiman_10.jpg \n inflating: /data/training/Marc_Shaiman_11.jpg \n inflating: /data/training/Marc_Shaiman_12.jpg \n inflating: /data/training/Marc_Shaiman_20.jpg \n inflating: /data/training/Marc_Shaiman_21.jpg \n inflating: /data/training/Marc_Shaiman_22.jpg \n inflating: /data/training/Marc_Shaiman_30.jpg \n inflating: /data/training/Marc_Shaiman_31.jpg \n inflating: /data/training/Marc_Shaiman_32.jpg \n inflating: /data/training/Margaret_Thatcher_10.jpg \n inflating: /data/training/Margaret_Thatcher_11.jpg \n inflating: /data/training/Margaret_Thatcher_12.jpg \n inflating: /data/training/Margaret_Thatcher_30.jpg \n inflating: /data/training/Margaret_Thatcher_31.jpg \n inflating: /data/training/Margaret_Thatcher_32.jpg \n inflating: /data/training/Margaret_Thatcher_40.jpg \n inflating: /data/training/Margaret_Thatcher_41.jpg \n inflating: /data/training/Margaret_Thatcher_42.jpg \n inflating: /data/training/Margaret_Thatcher_50.jpg \n inflating: /data/training/Margaret_Thatcher_51.jpg \n inflating: /data/training/Margaret_Thatcher_52.jpg \n inflating: /data/training/Maria_Soledad_Alvear_Valenzuela_10.jpg \n inflating: /data/training/Maria_Soledad_Alvear_Valenzuela_11.jpg \n inflating: /data/training/Maria_Soledad_Alvear_Valenzuela_12.jpg \n inflating: /data/training/Maria_Soledad_Alvear_Valenzuela_30.jpg \n inflating: /data/training/Maria_Soledad_Alvear_Valenzuela_31.jpg \n inflating: /data/training/Maria_Soledad_Alvear_Valenzuela_32.jpg \n inflating: /data/training/Maria_Soledad_Alvear_Valenzuela_40.jpg \n inflating: /data/training/Maria_Soledad_Alvear_Valenzuela_41.jpg \n inflating: /data/training/Maria_Soledad_Alvear_Valenzuela_42.jpg \n inflating: /data/training/Mariana_Ohata_00.jpg \n inflating: /data/training/Mariana_Ohata_01.jpg \n inflating: /data/training/Mariana_Ohata_02.jpg \n inflating: /data/training/Mariana_Ohata_20.jpg \n inflating: /data/training/Mariana_Ohata_21.jpg \n inflating: /data/training/Mariana_Ohata_22.jpg \n inflating: /data/training/Mariana_Ohata_30.jpg \n inflating: /data/training/Mariana_Ohata_31.jpg \n inflating: /data/training/Mariana_Ohata_32.jpg \n inflating: /data/training/Marieta_Chrousala_00.jpg \n inflating: /data/training/Marieta_Chrousala_01.jpg \n inflating: /data/training/Marieta_Chrousala_02.jpg \n inflating: /data/training/Marieta_Chrousala_10.jpg \n inflating: /data/training/Marieta_Chrousala_11.jpg \n inflating: /data/training/Marieta_Chrousala_12.jpg \n inflating: /data/training/Marieta_Chrousala_40.jpg \n inflating: /data/training/Marieta_Chrousala_41.jpg \n inflating: /data/training/Marieta_Chrousala_42.jpg \n inflating: /data/training/Marina_Silva_10.jpg \n inflating: /data/training/Marina_Silva_11.jpg \n inflating: /data/training/Marina_Silva_12.jpg \n inflating: /data/training/Marina_Silva_20.jpg \n inflating: /data/training/Marina_Silva_21.jpg \n inflating: /data/training/Marina_Silva_22.jpg \n inflating: /data/training/Marina_Silva_40.jpg \n inflating: /data/training/Marina_Silva_41.jpg \n inflating: /data/training/Marina_Silva_42.jpg \n inflating: /data/training/Marina_Silva_50.jpg \n inflating: /data/training/Marina_Silva_51.jpg \n inflating: /data/training/Marina_Silva_52.jpg \n inflating: /data/training/Mario_Kreutzberger_20.jpg \n inflating: /data/training/Mario_Kreutzberger_21.jpg \n inflating: /data/training/Mario_Kreutzberger_22.jpg \n inflating: /data/training/Mario_Kreutzberger_30.jpg \n inflating: /data/training/Mario_Kreutzberger_31.jpg \n inflating: /data/training/Mario_Kreutzberger_32.jpg \n inflating: /data/training/Mario_Kreutzberger_40.jpg \n inflating: /data/training/Mario_Kreutzberger_41.jpg \n inflating: /data/training/Mario_Kreutzberger_42.jpg \n inflating: /data/training/Marisa_Tomei_10.jpg \n inflating: /data/training/Marisa_Tomei_11.jpg \n inflating: /data/training/Marisa_Tomei_12.jpg \n inflating: /data/training/Marisa_Tomei_20.jpg \n inflating: /data/training/Marisa_Tomei_21.jpg \n inflating: /data/training/Marisa_Tomei_22.jpg \n inflating: /data/training/Marisa_Tomei_40.jpg \n inflating: /data/training/Marisa_Tomei_41.jpg \n inflating: /data/training/Marisa_Tomei_42.jpg \n inflating: /data/training/Marissa_Jaret_Winokur_00.jpg \n inflating: /data/training/Marissa_Jaret_Winokur_01.jpg \n inflating: /data/training/Marissa_Jaret_Winokur_02.jpg \n inflating: /data/training/Marissa_Jaret_Winokur_30.jpg \n inflating: /data/training/Marissa_Jaret_Winokur_31.jpg \n inflating: /data/training/Marissa_Jaret_Winokur_32.jpg \n inflating: /data/training/Marissa_Jaret_Winokur_40.jpg \n inflating: /data/training/Marissa_Jaret_Winokur_41.jpg \n inflating: /data/training/Marissa_Jaret_Winokur_42.jpg \n inflating: /data/training/Mark_Foley_10.jpg \n inflating: /data/training/Mark_Foley_11.jpg \n inflating: /data/training/Mark_Foley_12.jpg \n inflating: /data/training/Mark_Foley_40.jpg \n inflating: /data/training/Mark_Foley_41.jpg \n inflating: /data/training/Mark_Foley_42.jpg \n inflating: /data/training/Mark_Foley_50.jpg \n inflating: /data/training/Mark_Foley_51.jpg \n inflating: /data/training/Mark_Foley_52.jpg \n inflating: /data/training/Mark_Leno_10.jpg \n inflating: /data/training/Mark_Leno_11.jpg \n inflating: /data/training/Mark_Leno_12.jpg \n inflating: /data/training/Mark_Leno_20.jpg \n inflating: /data/training/Mark_Leno_21.jpg \n inflating: /data/training/Mark_Leno_22.jpg \n inflating: /data/training/Mark_Leno_30.jpg \n inflating: /data/training/Mark_Leno_31.jpg \n inflating: /data/training/Mark_Leno_32.jpg \n inflating: /data/training/Martin_Luther_King_III_00.jpg \n inflating: /data/training/Martin_Luther_King_III_01.jpg \n inflating: /data/training/Martin_Luther_King_III_02.jpg \n inflating: /data/training/Martin_Luther_King_III_30.jpg \n inflating: /data/training/Martin_Luther_King_III_31.jpg \n inflating: /data/training/Martin_Luther_King_III_32.jpg \n inflating: /data/training/Martin_Luther_King_III_50.jpg \n inflating: /data/training/Martin_Luther_King_III_51.jpg \n inflating: /data/training/Martin_Luther_King_III_52.jpg \n inflating: /data/training/Martin_Sheen_00.jpg \n inflating: /data/training/Martin_Sheen_01.jpg \n inflating: /data/training/Martin_Sheen_02.jpg \n inflating: /data/training/Martin_Sheen_30.jpg \n inflating: /data/training/Martin_Sheen_31.jpg \n inflating: /data/training/Martin_Sheen_32.jpg \n inflating: /data/training/Martin_Sheen_40.jpg \n inflating: /data/training/Martin_Sheen_41.jpg \n inflating: /data/training/Martin_Sheen_42.jpg \n inflating: /data/training/Martin_Sheen_50.jpg \n inflating: /data/training/Martin_Sheen_51.jpg \n inflating: /data/training/Martin_Sheen_52.jpg \n inflating: /data/training/Mary_Landrieu_00.jpg \n inflating: /data/training/Mary_Landrieu_01.jpg \n inflating: /data/training/Mary_Landrieu_02.jpg \n inflating: /data/training/Mary_Landrieu_20.jpg \n inflating: /data/training/Mary_Landrieu_21.jpg \n inflating: /data/training/Mary_Landrieu_22.jpg \n inflating: /data/training/Mary_Landrieu_30.jpg \n inflating: /data/training/Mary_Landrieu_31.jpg \n inflating: /data/training/Mary_Landrieu_32.jpg \n inflating: /data/training/Mary_Robinson_10.jpg \n inflating: /data/training/Mary_Robinson_11.jpg \n inflating: /data/training/Mary_Robinson_12.jpg \n inflating: /data/training/Mary_Robinson_20.jpg \n inflating: /data/training/Mary_Robinson_21.jpg \n inflating: /data/training/Mary_Robinson_22.jpg \n inflating: /data/training/Mary_Robinson_40.jpg \n inflating: /data/training/Mary_Robinson_41.jpg \n inflating: /data/training/Mary_Robinson_42.jpg \n inflating: /data/training/Mary_Robinson_50.jpg \n inflating: /data/training/Mary_Robinson_51.jpg \n inflating: /data/training/Mary_Robinson_52.jpg \n inflating: /data/training/Massoud_Barzani_00.jpg \n inflating: /data/training/Massoud_Barzani_01.jpg \n inflating: /data/training/Massoud_Barzani_02.jpg \n inflating: /data/training/Massoud_Barzani_10.jpg \n inflating: /data/training/Massoud_Barzani_11.jpg \n inflating: /data/training/Massoud_Barzani_12.jpg \n inflating: /data/training/Massoud_Barzani_20.jpg \n inflating: /data/training/Massoud_Barzani_21.jpg \n inflating: /data/training/Massoud_Barzani_22.jpg \n inflating: /data/training/Massoud_Barzani_40.jpg \n inflating: /data/training/Massoud_Barzani_41.jpg \n inflating: /data/training/Massoud_Barzani_42.jpg \n inflating: /data/training/Matt_LeBlanc_00.jpg \n inflating: /data/training/Matt_LeBlanc_01.jpg \n inflating: /data/training/Matt_LeBlanc_02.jpg \n inflating: /data/training/Matt_LeBlanc_20.jpg \n inflating: /data/training/Matt_LeBlanc_21.jpg \n inflating: /data/training/Matt_LeBlanc_22.jpg \n inflating: /data/training/Matt_LeBlanc_30.jpg \n inflating: /data/training/Matt_LeBlanc_31.jpg \n inflating: /data/training/Matt_LeBlanc_32.jpg \n inflating: /data/training/Nancy_Kerrigan_00.jpg \n inflating: /data/training/Nancy_Kerrigan_01.jpg \n inflating: /data/training/Nancy_Kerrigan_02.jpg \n inflating: /data/training/Nancy_Kerrigan_20.jpg \n inflating: /data/training/Nancy_Kerrigan_21.jpg \n inflating: /data/training/Nancy_Kerrigan_22.jpg \n inflating: /data/training/Nancy_Kerrigan_30.jpg \n inflating: /data/training/Nancy_Kerrigan_31.jpg \n inflating: /data/training/Nancy_Kerrigan_32.jpg \n inflating: /data/training/Nancy_Kerrigan_40.jpg \n inflating: /data/training/Nancy_Kerrigan_41.jpg \n inflating: /data/training/Nancy_Kerrigan_42.jpg \n inflating: /data/training/Nancy_Reagan_00.jpg \n inflating: /data/training/Nancy_Reagan_01.jpg \n inflating: /data/training/Nancy_Reagan_02.jpg \n inflating: /data/training/Nancy_Reagan_10.jpg \n inflating: /data/training/Nancy_Reagan_11.jpg \n inflating: /data/training/Nancy_Reagan_12.jpg \n inflating: /data/training/Nancy_Reagan_30.jpg \n inflating: /data/training/Nancy_Reagan_31.jpg \n inflating: /data/training/Nancy_Reagan_32.jpg \n inflating: /data/training/Nancy_Reagan_40.jpg \n inflating: /data/training/Nancy_Reagan_41.jpg \n inflating: /data/training/Nancy_Reagan_42.jpg \n inflating: /data/training/Nanni_Moretti_10.jpg \n inflating: /data/training/Nanni_Moretti_11.jpg \n inflating: /data/training/Nanni_Moretti_12.jpg \n inflating: /data/training/Nanni_Moretti_20.jpg \n inflating: /data/training/Nanni_Moretti_21.jpg \n inflating: /data/training/Nanni_Moretti_22.jpg \n inflating: /data/training/Nanni_Moretti_40.jpg \n inflating: /data/training/Nanni_Moretti_41.jpg \n inflating: /data/training/Nanni_Moretti_42.jpg \n inflating: /data/training/Natalia_Vodonova_00.jpg \n inflating: /data/training/Natalia_Vodonova_01.jpg \n inflating: /data/training/Natalia_Vodonova_02.jpg \n inflating: /data/training/Natalia_Vodonova_10.jpg \n inflating: /data/training/Natalia_Vodonova_11.jpg \n inflating: /data/training/Natalia_Vodonova_12.jpg \n inflating: /data/training/Natalia_Vodonova_20.jpg \n inflating: /data/training/Natalia_Vodonova_21.jpg \n inflating: /data/training/Natalia_Vodonova_22.jpg \n inflating: /data/training/Natasha_Lyonne_00.jpg \n inflating: /data/training/Natasha_Lyonne_01.jpg \n inflating: /data/training/Natasha_Lyonne_02.jpg \n inflating: /data/training/Natasha_Lyonne_10.jpg \n inflating: /data/training/Natasha_Lyonne_11.jpg \n inflating: /data/training/Natasha_Lyonne_12.jpg \n inflating: /data/training/Natasha_Lyonne_40.jpg \n inflating: /data/training/Natasha_Lyonne_41.jpg \n inflating: /data/training/Natasha_Lyonne_42.jpg \n inflating: /data/training/Nick_Reilly_10.jpg \n inflating: /data/training/Nick_Reilly_11.jpg \n inflating: /data/training/Nick_Reilly_12.jpg \n inflating: /data/training/Nick_Reilly_40.jpg \n inflating: /data/training/Nick_Reilly_41.jpg \n inflating: /data/training/Nick_Reilly_42.jpg \n inflating: /data/training/Nick_Reilly_50.jpg \n inflating: /data/training/Nick_Reilly_51.jpg \n inflating: /data/training/Nick_Reilly_52.jpg \n inflating: /data/training/Nicolas_Eyzaguirre_00.jpg \n inflating: /data/training/Nicolas_Eyzaguirre_01.jpg \n inflating: /data/training/Nicolas_Eyzaguirre_02.jpg \n inflating: /data/training/Nicolas_Eyzaguirre_10.jpg \n inflating: /data/training/Nicolas_Eyzaguirre_11.jpg \n inflating: /data/training/Nicolas_Eyzaguirre_12.jpg \n inflating: /data/training/Nicolas_Eyzaguirre_20.jpg \n inflating: /data/training/Nicolas_Eyzaguirre_21.jpg \n inflating: /data/training/Nicolas_Eyzaguirre_22.jpg \n inflating: /data/training/Nicolas_Sarkozy_00.jpg \n inflating: /data/training/Nicolas_Sarkozy_01.jpg \n inflating: /data/training/Nicolas_Sarkozy_02.jpg \n inflating: /data/training/Nicolas_Sarkozy_10.jpg \n inflating: /data/training/Nicolas_Sarkozy_11.jpg \n inflating: /data/training/Nicolas_Sarkozy_12.jpg \n inflating: /data/training/Nicolas_Sarkozy_20.jpg \n inflating: /data/training/Nicolas_Sarkozy_21.jpg \n inflating: /data/training/Nicolas_Sarkozy_22.jpg \n inflating: /data/training/Nicolas_Sarkozy_50.jpg \n inflating: /data/training/Nicolas_Sarkozy_51.jpg \n inflating: /data/training/Nicolas_Sarkozy_52.jpg \n inflating: /data/training/Nina_Jacobson_00.jpg \n inflating: /data/training/Nina_Jacobson_01.jpg \n inflating: /data/training/Nina_Jacobson_02.jpg \n inflating: /data/training/Nina_Jacobson_10.jpg \n inflating: /data/training/Nina_Jacobson_11.jpg \n inflating: /data/training/Nina_Jacobson_12.jpg \n inflating: /data/training/Nina_Jacobson_30.jpg \n inflating: /data/training/Nina_Jacobson_31.jpg \n inflating: /data/training/Nina_Jacobson_32.jpg \n inflating: /data/training/Norah_Jones_10.jpg \n inflating: /data/training/Norah_Jones_11.jpg \n inflating: /data/training/Norah_Jones_12.jpg \n inflating: /data/training/Norah_Jones_20.jpg \n inflating: /data/training/Norah_Jones_21.jpg \n inflating: /data/training/Norah_Jones_22.jpg \n inflating: /data/training/Norah_Jones_40.jpg \n inflating: /data/training/Norah_Jones_41.jpg \n inflating: /data/training/Norah_Jones_42.jpg \n inflating: /data/training/Norah_Jones_50.jpg \n inflating: /data/training/Norah_Jones_51.jpg \n inflating: /data/training/Norah_Jones_52.jpg \n inflating: /data/training/Norman_Mineta_00.jpg \n inflating: /data/training/Norman_Mineta_01.jpg \n inflating: /data/training/Norman_Mineta_02.jpg \n inflating: /data/training/Norman_Mineta_30.jpg \n inflating: /data/training/Norman_Mineta_31.jpg \n inflating: /data/training/Norman_Mineta_32.jpg \n inflating: /data/training/Norman_Mineta_50.jpg \n inflating: /data/training/Norman_Mineta_51.jpg \n inflating: /data/training/Norman_Mineta_52.jpg \n inflating: /data/training/Olene_Walker_00.jpg \n inflating: /data/training/Olene_Walker_01.jpg \n inflating: /data/training/Olene_Walker_02.jpg \n inflating: /data/training/Olene_Walker_10.jpg \n inflating: /data/training/Olene_Walker_11.jpg \n inflating: /data/training/Olene_Walker_12.jpg \n inflating: /data/training/Olene_Walker_30.jpg \n inflating: /data/training/Olene_Walker_31.jpg \n inflating: /data/training/Olene_Walker_32.jpg \n inflating: /data/training/Olene_Walker_40.jpg \n inflating: /data/training/Olene_Walker_41.jpg \n inflating: /data/training/Olene_Walker_42.jpg \n inflating: /data/training/Olivia_Newton-John_00.jpg \n inflating: /data/training/Olivia_Newton-John_01.jpg \n inflating: /data/training/Olivia_Newton-John_02.jpg \n inflating: /data/training/Olivia_Newton-John_10.jpg \n inflating: /data/training/Olivia_Newton-John_11.jpg \n inflating: /data/training/Olivia_Newton-John_12.jpg \n inflating: /data/training/Olivia_Newton-John_40.jpg \n inflating: /data/training/Olivia_Newton-John_41.jpg \n inflating: /data/training/Olivia_Newton-John_42.jpg \n inflating: /data/training/Orlando_Bloom_00.jpg \n inflating: /data/training/Orlando_Bloom_01.jpg \n inflating: /data/training/Orlando_Bloom_02.jpg \n inflating: /data/training/Orlando_Bloom_30.jpg \n inflating: /data/training/Orlando_Bloom_31.jpg \n inflating: /data/training/Orlando_Bloom_32.jpg \n inflating: /data/training/Orlando_Bloom_40.jpg \n inflating: /data/training/Orlando_Bloom_41.jpg \n inflating: /data/training/Orlando_Bloom_42.jpg \n inflating: /data/training/Orlando_Bloom_50.jpg \n inflating: /data/training/Orlando_Bloom_51.jpg \n inflating: /data/training/Orlando_Bloom_52.jpg \n inflating: /data/training/Otto_Reich_00.jpg \n inflating: /data/training/Otto_Reich_01.jpg \n inflating: /data/training/Otto_Reich_02.jpg \n inflating: /data/training/Otto_Reich_10.jpg \n inflating: /data/training/Otto_Reich_11.jpg \n inflating: /data/training/Otto_Reich_12.jpg \n inflating: /data/training/Otto_Reich_30.jpg \n inflating: /data/training/Otto_Reich_31.jpg \n inflating: /data/training/Otto_Reich_32.jpg \n inflating: /data/training/Otto_Reich_40.jpg \n inflating: /data/training/Otto_Reich_41.jpg \n inflating: /data/training/Otto_Reich_42.jpg \n inflating: /data/training/Pat_Riley_00.jpg \n inflating: /data/training/Pat_Riley_01.jpg \n inflating: /data/training/Pat_Riley_02.jpg \n inflating: /data/training/Pat_Riley_20.jpg \n inflating: /data/training/Pat_Riley_21.jpg \n inflating: /data/training/Pat_Riley_22.jpg \n inflating: /data/training/Pat_Riley_50.jpg \n inflating: /data/training/Pat_Riley_51.jpg \n inflating: /data/training/Pat_Riley_52.jpg \n inflating: /data/training/Patrick_Leahy_10.jpg \n inflating: /data/training/Patrick_Leahy_11.jpg \n inflating: /data/training/Patrick_Leahy_12.jpg \n inflating: /data/training/Patrick_Leahy_20.jpg \n inflating: /data/training/Patrick_Leahy_21.jpg \n inflating: /data/training/Patrick_Leahy_22.jpg \n inflating: /data/training/Patrick_Leahy_30.jpg \n inflating: /data/training/Patrick_Leahy_31.jpg \n inflating: /data/training/Patrick_Leahy_32.jpg \n inflating: /data/training/Paul_Otellini_00.jpg \n inflating: /data/training/Paul_Otellini_01.jpg \n inflating: /data/training/Paul_Otellini_02.jpg \n inflating: /data/training/Paul_Otellini_10.jpg \n inflating: /data/training/Paul_Otellini_11.jpg \n inflating: /data/training/Paul_Otellini_12.jpg \n inflating: /data/training/Paul_Otellini_20.jpg \n inflating: /data/training/Paul_Otellini_21.jpg \n inflating: /data/training/Paul_Otellini_22.jpg \n inflating: /data/training/Paul_Reiser_00.jpg \n inflating: /data/training/Paul_Reiser_01.jpg \n inflating: /data/training/Paul_Reiser_02.jpg \n inflating: /data/training/Paul_Reiser_20.jpg \n inflating: /data/training/Paul_Reiser_21.jpg \n inflating: /data/training/Paul_Reiser_22.jpg \n inflating: /data/training/Paul_Reiser_30.jpg \n inflating: /data/training/Paul_Reiser_31.jpg \n inflating: /data/training/Paul_Reiser_32.jpg \n inflating: /data/training/Pedro_Solbes_00.jpg \n inflating: /data/training/Pedro_Solbes_01.jpg \n inflating: /data/training/Pedro_Solbes_02.jpg \n inflating: /data/training/Pedro_Solbes_20.jpg \n inflating: /data/training/Pedro_Solbes_21.jpg \n inflating: /data/training/Pedro_Solbes_22.jpg \n inflating: /data/training/Pedro_Solbes_30.jpg \n inflating: /data/training/Pedro_Solbes_31.jpg \n inflating: /data/training/Pedro_Solbes_32.jpg \n inflating: /data/training/Penelope_Ann_Miller_00.jpg \n inflating: /data/training/Penelope_Ann_Miller_01.jpg \n inflating: /data/training/Penelope_Ann_Miller_02.jpg \n inflating: /data/training/Penelope_Ann_Miller_20.jpg \n inflating: /data/training/Penelope_Ann_Miller_21.jpg \n inflating: /data/training/Penelope_Ann_Miller_22.jpg \n inflating: /data/training/Penelope_Ann_Miller_50.jpg \n inflating: /data/training/Penelope_Ann_Miller_51.jpg \n inflating: /data/training/Penelope_Ann_Miller_52.jpg \n inflating: /data/training/Peter_Goldmark_10.jpg \n inflating: /data/training/Peter_Goldmark_11.jpg \n inflating: /data/training/Peter_Goldmark_12.jpg \n inflating: /data/training/Peter_Goldmark_40.jpg \n inflating: /data/training/Peter_Goldmark_41.jpg \n inflating: /data/training/Peter_Goldmark_42.jpg \n inflating: /data/training/Peter_Goldmark_50.jpg \n inflating: /data/training/Peter_Goldmark_51.jpg \n inflating: /data/training/Peter_Goldmark_52.jpg \n inflating: /data/training/Peter_Medgyessy_10.jpg \n inflating: /data/training/Peter_Medgyessy_11.jpg \n inflating: /data/training/Peter_Medgyessy_12.jpg \n inflating: /data/training/Peter_Medgyessy_30.jpg \n inflating: /data/training/Peter_Medgyessy_31.jpg \n inflating: /data/training/Peter_Medgyessy_32.jpg \n inflating: /data/training/Peter_Medgyessy_40.jpg \n inflating: /data/training/Peter_Medgyessy_41.jpg \n inflating: /data/training/Peter_Medgyessy_42.jpg \n inflating: /data/training/Peter_Medgyessy_50.jpg \n inflating: /data/training/Peter_Medgyessy_51.jpg \n inflating: /data/training/Peter_Medgyessy_52.jpg \n inflating: /data/training/Philippe_Gagnon_00.jpg \n inflating: /data/training/Philippe_Gagnon_01.jpg \n inflating: /data/training/Philippe_Gagnon_02.jpg \n inflating: /data/training/Philippe_Gagnon_10.jpg \n inflating: /data/training/Philippe_Gagnon_11.jpg \n inflating: /data/training/Philippe_Gagnon_12.jpg \n inflating: /data/training/Philippe_Gagnon_20.jpg \n inflating: /data/training/Philippe_Gagnon_21.jpg \n inflating: /data/training/Philippe_Gagnon_22.jpg \n inflating: /data/training/Philippe_Gagnon_30.jpg \n inflating: /data/training/Philippe_Gagnon_31.jpg \n inflating: /data/training/Philippe_Gagnon_32.jpg \n inflating: /data/training/Philippe_Noiret_10.jpg \n inflating: /data/training/Philippe_Noiret_11.jpg \n inflating: /data/training/Philippe_Noiret_12.jpg \n inflating: /data/training/Philippe_Noiret_30.jpg \n inflating: /data/training/Philippe_Noiret_31.jpg \n inflating: /data/training/Philippe_Noiret_32.jpg \n inflating: /data/training/Philippe_Noiret_50.jpg \n inflating: /data/training/Philippe_Noiret_51.jpg \n inflating: /data/training/Philippe_Noiret_52.jpg \n inflating: /data/training/Picabo_Street_00.jpg \n inflating: /data/training/Picabo_Street_01.jpg \n inflating: /data/training/Picabo_Street_02.jpg \n inflating: /data/training/Picabo_Street_20.jpg \n inflating: /data/training/Picabo_Street_21.jpg \n inflating: /data/training/Picabo_Street_22.jpg \n inflating: /data/training/Picabo_Street_40.jpg \n inflating: /data/training/Picabo_Street_41.jpg \n inflating: /data/training/Picabo_Street_42.jpg \n inflating: /data/training/Pilar_Montenegro_10.jpg \n inflating: /data/training/Pilar_Montenegro_11.jpg \n inflating: /data/training/Pilar_Montenegro_12.jpg \n inflating: /data/training/Pilar_Montenegro_20.jpg \n inflating: /data/training/Pilar_Montenegro_21.jpg \n inflating: /data/training/Pilar_Montenegro_22.jpg \n inflating: /data/training/Pilar_Montenegro_50.jpg \n inflating: /data/training/Pilar_Montenegro_51.jpg \n inflating: /data/training/Pilar_Montenegro_52.jpg \n inflating: /data/training/Piotr_Anderszewski_20.jpg \n inflating: /data/training/Piotr_Anderszewski_21.jpg \n inflating: /data/training/Piotr_Anderszewski_22.jpg \n inflating: /data/training/Piotr_Anderszewski_30.jpg \n inflating: /data/training/Piotr_Anderszewski_31.jpg \n inflating: /data/training/Piotr_Anderszewski_32.jpg \n inflating: /data/training/Piotr_Anderszewski_50.jpg \n inflating: /data/training/Piotr_Anderszewski_51.jpg \n inflating: /data/training/Piotr_Anderszewski_52.jpg \n inflating: /data/training/Poala_Suarez_30.jpg \n inflating: /data/training/Poala_Suarez_31.jpg \n inflating: /data/training/Poala_Suarez_32.jpg \n inflating: /data/training/Poala_Suarez_40.jpg \n inflating: /data/training/Poala_Suarez_41.jpg \n inflating: /data/training/Poala_Suarez_42.jpg \n inflating: /data/training/Poala_Suarez_50.jpg \n inflating: /data/training/Poala_Suarez_51.jpg \n inflating: /data/training/Poala_Suarez_52.jpg \n inflating: /data/training/Prince_Harry_10.jpg \n inflating: /data/training/Prince_Harry_11.jpg \n inflating: /data/training/Prince_Harry_12.jpg \n inflating: /data/training/Prince_Harry_20.jpg \n inflating: /data/training/Prince_Harry_21.jpg \n inflating: /data/training/Prince_Harry_22.jpg \n inflating: /data/training/Prince_Harry_40.jpg \n inflating: /data/training/Prince_Harry_41.jpg \n inflating: /data/training/Prince_Harry_42.jpg \n inflating: /data/training/Princess_Stephanie_00.jpg \n inflating: /data/training/Princess_Stephanie_01.jpg \n inflating: /data/training/Princess_Stephanie_02.jpg \n inflating: /data/training/Princess_Stephanie_20.jpg \n inflating: /data/training/Princess_Stephanie_21.jpg \n inflating: /data/training/Princess_Stephanie_22.jpg \n inflating: /data/training/Princess_Stephanie_40.jpg \n inflating: /data/training/Princess_Stephanie_41.jpg \n inflating: /data/training/Princess_Stephanie_42.jpg \n inflating: /data/training/Princess_Stephanie_50.jpg \n inflating: /data/training/Princess_Stephanie_51.jpg \n inflating: /data/training/Princess_Stephanie_52.jpg \n inflating: /data/training/Priyanka_Chopra_10.jpg \n inflating: /data/training/Priyanka_Chopra_11.jpg \n inflating: /data/training/Priyanka_Chopra_12.jpg \n inflating: /data/training/Priyanka_Chopra_40.jpg \n inflating: /data/training/Priyanka_Chopra_41.jpg \n inflating: /data/training/Priyanka_Chopra_42.jpg \n inflating: /data/training/Priyanka_Chopra_50.jpg \n inflating: /data/training/Priyanka_Chopra_51.jpg \n inflating: /data/training/Priyanka_Chopra_52.jpg \n inflating: /data/training/Queen_Noor_10.jpg \n inflating: /data/training/Queen_Noor_11.jpg \n inflating: /data/training/Queen_Noor_12.jpg \n inflating: /data/training/Queen_Noor_30.jpg \n inflating: /data/training/Queen_Noor_31.jpg \n inflating: /data/training/Queen_Noor_32.jpg \n inflating: /data/training/Queen_Noor_50.jpg \n inflating: /data/training/Queen_Noor_51.jpg \n inflating: /data/training/Queen_Noor_52.jpg \n inflating: /data/training/Queen_Rania_10.jpg \n inflating: /data/training/Queen_Rania_11.jpg \n inflating: /data/training/Queen_Rania_12.jpg \n inflating: /data/training/Queen_Rania_30.jpg \n inflating: /data/training/Queen_Rania_31.jpg \n inflating: /data/training/Queen_Rania_32.jpg \n inflating: /data/training/Queen_Rania_50.jpg \n inflating: /data/training/Queen_Rania_51.jpg \n inflating: /data/training/Queen_Rania_52.jpg \n inflating: /data/training/Rachel_Hunter_30.jpg \n inflating: /data/training/Rachel_Hunter_31.jpg \n inflating: /data/training/Rachel_Hunter_32.jpg \n inflating: /data/training/Rachel_Hunter_40.jpg \n inflating: /data/training/Rachel_Hunter_41.jpg \n inflating: /data/training/Rachel_Hunter_42.jpg \n inflating: /data/training/Rachel_Hunter_50.jpg \n inflating: /data/training/Rachel_Hunter_51.jpg \n inflating: /data/training/Rachel_Hunter_52.jpg \n inflating: /data/training/Raja_Zafar-ul-Haq_00.jpg \n inflating: /data/training/Raja_Zafar-ul-Haq_01.jpg \n inflating: /data/training/Raja_Zafar-ul-Haq_02.jpg \n inflating: /data/training/Raja_Zafar-ul-Haq_10.jpg \n inflating: /data/training/Raja_Zafar-ul-Haq_11.jpg \n inflating: /data/training/Raja_Zafar-ul-Haq_12.jpg \n inflating: /data/training/Raja_Zafar-ul-Haq_20.jpg \n inflating: /data/training/Raja_Zafar-ul-Haq_21.jpg \n inflating: /data/training/Raja_Zafar-ul-Haq_22.jpg \n inflating: /data/training/Raja_Zafar-ul-Haq_30.jpg \n inflating: /data/training/Raja_Zafar-ul-Haq_31.jpg \n inflating: /data/training/Raja_Zafar-ul-Haq_32.jpg \n inflating: /data/training/Ralph_Klein_00.jpg \n inflating: /data/training/Ralph_Klein_01.jpg \n inflating: /data/training/Ralph_Klein_02.jpg \n inflating: /data/training/Ralph_Klein_10.jpg \n inflating: /data/training/Ralph_Klein_11.jpg \n inflating: /data/training/Ralph_Klein_12.jpg \n inflating: /data/training/Ralph_Klein_30.jpg \n inflating: /data/training/Ralph_Klein_31.jpg \n inflating: /data/training/Ralph_Klein_32.jpg \n inflating: /data/training/Raza_Rabbani_20.jpg \n inflating: /data/training/Raza_Rabbani_21.jpg \n inflating: /data/training/Raza_Rabbani_22.jpg \n inflating: /data/training/Raza_Rabbani_30.jpg \n inflating: /data/training/Raza_Rabbani_31.jpg \n inflating: /data/training/Raza_Rabbani_32.jpg \n inflating: /data/training/Raza_Rabbani_50.jpg \n inflating: /data/training/Raza_Rabbani_51.jpg \n inflating: /data/training/Raza_Rabbani_52.jpg \n inflating: /data/training/Recep_Tayyip_Erdogan_00.jpg \n inflating: /data/training/Recep_Tayyip_Erdogan_01.jpg \n inflating: /data/training/Recep_Tayyip_Erdogan_02.jpg \n inflating: /data/training/Recep_Tayyip_Erdogan_20.jpg \n inflating: /data/training/Recep_Tayyip_Erdogan_21.jpg \n inflating: /data/training/Recep_Tayyip_Erdogan_22.jpg \n inflating: /data/training/Recep_Tayyip_Erdogan_40.jpg \n inflating: /data/training/Recep_Tayyip_Erdogan_41.jpg \n inflating: /data/training/Recep_Tayyip_Erdogan_42.jpg \n inflating: /data/training/Reese_Witherspoon_00.jpg \n inflating: /data/training/Reese_Witherspoon_01.jpg \n inflating: /data/training/Reese_Witherspoon_02.jpg \n inflating: /data/training/Reese_Witherspoon_10.jpg \n inflating: /data/training/Reese_Witherspoon_11.jpg \n inflating: /data/training/Reese_Witherspoon_12.jpg \n inflating: /data/training/Reese_Witherspoon_40.jpg \n inflating: /data/training/Reese_Witherspoon_41.jpg \n inflating: /data/training/Reese_Witherspoon_42.jpg \n inflating: /data/training/Ricardo_Lopez_Murphy_10.jpg \n inflating: /data/training/Ricardo_Lopez_Murphy_11.jpg \n inflating: /data/training/Ricardo_Lopez_Murphy_12.jpg \n inflating: /data/training/Ricardo_Lopez_Murphy_30.jpg \n inflating: /data/training/Ricardo_Lopez_Murphy_31.jpg \n inflating: /data/training/Ricardo_Lopez_Murphy_32.jpg \n inflating: /data/training/Ricardo_Lopez_Murphy_40.jpg \n inflating: /data/training/Ricardo_Lopez_Murphy_41.jpg \n inflating: /data/training/Ricardo_Lopez_Murphy_42.jpg \n inflating: /data/training/Ricardo_Sanchez_20.jpg \n inflating: /data/training/Ricardo_Sanchez_21.jpg \n inflating: /data/training/Ricardo_Sanchez_22.jpg \n inflating: /data/training/Ricardo_Sanchez_30.jpg \n inflating: /data/training/Ricardo_Sanchez_31.jpg \n inflating: /data/training/Ricardo_Sanchez_32.jpg \n inflating: /data/training/Ricardo_Sanchez_40.jpg \n inflating: /data/training/Ricardo_Sanchez_41.jpg \n inflating: /data/training/Ricardo_Sanchez_42.jpg \n inflating: /data/training/Richard_Branson_00.jpg \n inflating: /data/training/Richard_Branson_01.jpg \n inflating: /data/training/Richard_Branson_02.jpg \n inflating: /data/training/Richard_Branson_10.jpg \n inflating: /data/training/Richard_Branson_11.jpg \n inflating: /data/training/Richard_Branson_12.jpg \n inflating: /data/training/Richard_Branson_50.jpg \n inflating: /data/training/Richard_Branson_51.jpg \n inflating: /data/training/Richard_Branson_52.jpg \n inflating: /data/training/Richard_Lennon_00.jpg \n inflating: /data/training/Richard_Lennon_01.jpg \n inflating: /data/training/Richard_Lennon_02.jpg \n inflating: /data/training/Richard_Lennon_30.jpg \n inflating: /data/training/Richard_Lennon_31.jpg \n inflating: /data/training/Richard_Lennon_32.jpg \n inflating: /data/training/Richard_Lennon_40.jpg \n inflating: /data/training/Richard_Lennon_41.jpg \n inflating: /data/training/Richard_Lennon_42.jpg \n inflating: /data/training/Richard_Lugar_00.jpg \n inflating: /data/training/Richard_Lugar_01.jpg \n inflating: /data/training/Richard_Lugar_02.jpg \n inflating: /data/training/Richard_Lugar_10.jpg \n inflating: /data/training/Richard_Lugar_11.jpg \n inflating: /data/training/Richard_Lugar_12.jpg \n inflating: /data/training/Richard_Lugar_20.jpg \n inflating: /data/training/Richard_Lugar_21.jpg \n inflating: /data/training/Richard_Lugar_22.jpg \n inflating: /data/training/Richard_Lugar_50.jpg \n inflating: /data/training/Richard_Lugar_51.jpg \n inflating: /data/training/Richard_Lugar_52.jpg \n inflating: /data/training/Richard_Paul_Evans_00.jpg \n inflating: /data/training/Richard_Paul_Evans_01.jpg \n inflating: /data/training/Richard_Paul_Evans_02.jpg \n inflating: /data/training/Richard_Paul_Evans_20.jpg \n inflating: /data/training/Richard_Paul_Evans_21.jpg \n inflating: /data/training/Richard_Paul_Evans_22.jpg \n inflating: /data/training/Richard_Paul_Evans_40.jpg \n inflating: /data/training/Richard_Paul_Evans_41.jpg \n inflating: /data/training/Richard_Paul_Evans_42.jpg \n inflating: /data/training/Richard_Paul_Evans_50.jpg \n inflating: /data/training/Richard_Paul_Evans_51.jpg \n inflating: /data/training/Richard_Paul_Evans_52.jpg \n inflating: /data/training/Rick_Bragg_20.jpg \n inflating: /data/training/Rick_Bragg_21.jpg \n inflating: /data/training/Rick_Bragg_22.jpg \n inflating: /data/training/Rick_Bragg_30.jpg \n inflating: /data/training/Rick_Bragg_31.jpg \n inflating: /data/training/Rick_Bragg_32.jpg \n inflating: /data/training/Rick_Bragg_50.jpg \n inflating: /data/training/Rick_Bragg_51.jpg \n inflating: /data/training/Rick_Bragg_52.jpg \n inflating: /data/training/Ridley_Scott_10.jpg \n inflating: /data/training/Ridley_Scott_11.jpg \n inflating: /data/training/Ridley_Scott_12.jpg \n inflating: /data/training/Ridley_Scott_20.jpg \n inflating: /data/training/Ridley_Scott_21.jpg \n inflating: /data/training/Ridley_Scott_22.jpg \n inflating: /data/training/Ridley_Scott_30.jpg \n inflating: /data/training/Ridley_Scott_31.jpg \n inflating: /data/training/Ridley_Scott_32.jpg \n inflating: /data/training/Robbie_Coltrane_00.jpg \n inflating: /data/training/Robbie_Coltrane_01.jpg \n inflating: /data/training/Robbie_Coltrane_02.jpg \n inflating: /data/training/Robbie_Coltrane_10.jpg \n inflating: /data/training/Robbie_Coltrane_11.jpg \n inflating: /data/training/Robbie_Coltrane_12.jpg \n inflating: /data/training/Robbie_Coltrane_20.jpg \n inflating: /data/training/Robbie_Coltrane_21.jpg \n inflating: /data/training/Robbie_Coltrane_22.jpg \n inflating: /data/training/Robbie_Coltrane_50.jpg \n inflating: /data/training/Robbie_Coltrane_51.jpg \n inflating: /data/training/Robbie_Coltrane_52.jpg \n inflating: /data/training/Robert_Altman_10.jpg \n inflating: /data/training/Robert_Altman_11.jpg \n inflating: /data/training/Robert_Altman_12.jpg \n inflating: /data/training/Robert_Altman_20.jpg \n inflating: /data/training/Robert_Altman_21.jpg \n inflating: /data/training/Robert_Altman_22.jpg \n inflating: /data/training/Robert_Altman_50.jpg \n inflating: /data/training/Robert_Altman_51.jpg \n inflating: /data/training/Robert_Altman_52.jpg \n inflating: /data/training/Roberto_Benigni_00.jpg \n inflating: /data/training/Roberto_Benigni_01.jpg \n inflating: /data/training/Roberto_Benigni_02.jpg \n inflating: /data/training/Roberto_Benigni_10.jpg \n inflating: /data/training/Roberto_Benigni_11.jpg \n inflating: /data/training/Roberto_Benigni_12.jpg \n inflating: /data/training/Roberto_Benigni_50.jpg \n inflating: /data/training/Roberto_Benigni_51.jpg \n inflating: /data/training/Roberto_Benigni_52.jpg \n inflating: /data/training/Rocco_Buttiglione_00.jpg \n inflating: /data/training/Rocco_Buttiglione_01.jpg \n inflating: /data/training/Rocco_Buttiglione_02.jpg \n inflating: /data/training/Rocco_Buttiglione_10.jpg \n inflating: /data/training/Rocco_Buttiglione_11.jpg \n inflating: /data/training/Rocco_Buttiglione_12.jpg \n inflating: /data/training/Rocco_Buttiglione_30.jpg \n inflating: /data/training/Rocco_Buttiglione_31.jpg \n inflating: /data/training/Rocco_Buttiglione_32.jpg \n inflating: /data/training/Rocco_Buttiglione_40.jpg \n inflating: /data/training/Rocco_Buttiglione_41.jpg \n inflating: /data/training/Rocco_Buttiglione_42.jpg \n inflating: /data/training/Rodrigo_Borja_00.jpg \n inflating: /data/training/Rodrigo_Borja_01.jpg \n inflating: /data/training/Rodrigo_Borja_02.jpg \n inflating: /data/training/Rodrigo_Borja_40.jpg \n inflating: /data/training/Rodrigo_Borja_41.jpg \n inflating: /data/training/Rodrigo_Borja_42.jpg \n inflating: /data/training/Rodrigo_Borja_50.jpg \n inflating: /data/training/Rodrigo_Borja_51.jpg \n inflating: /data/training/Rodrigo_Borja_52.jpg \n inflating: /data/training/Saeed_Mortazavi_10.jpg \n inflating: /data/training/Saeed_Mortazavi_11.jpg \n inflating: /data/training/Saeed_Mortazavi_12.jpg \n inflating: /data/training/Saeed_Mortazavi_20.jpg \n inflating: /data/training/Saeed_Mortazavi_21.jpg \n inflating: /data/training/Saeed_Mortazavi_22.jpg \n inflating: /data/training/Saeed_Mortazavi_50.jpg \n inflating: /data/training/Saeed_Mortazavi_51.jpg \n inflating: /data/training/Saeed_Mortazavi_52.jpg \n inflating: /data/training/Sally_Ride_00.jpg \n inflating: /data/training/Sally_Ride_01.jpg \n inflating: /data/training/Sally_Ride_02.jpg \n inflating: /data/training/Sally_Ride_40.jpg \n inflating: /data/training/Sally_Ride_41.jpg \n inflating: /data/training/Sally_Ride_42.jpg \n inflating: /data/training/Sally_Ride_50.jpg \n inflating: /data/training/Sally_Ride_51.jpg \n inflating: /data/training/Sally_Ride_52.jpg \n inflating: /data/training/Sanjay_Gupta_10.jpg \n inflating: /data/training/Sanjay_Gupta_11.jpg \n inflating: /data/training/Sanjay_Gupta_12.jpg \n inflating: /data/training/Sanjay_Gupta_20.jpg \n inflating: /data/training/Sanjay_Gupta_21.jpg \n inflating: /data/training/Sanjay_Gupta_22.jpg \n inflating: /data/training/Sanjay_Gupta_40.jpg \n inflating: /data/training/Sanjay_Gupta_41.jpg \n inflating: /data/training/Sanjay_Gupta_42.jpg \n inflating: /data/training/Sara_Silverman_10.jpg \n inflating: /data/training/Sara_Silverman_11.jpg \n inflating: /data/training/Sara_Silverman_12.jpg \n inflating: /data/training/Sara_Silverman_20.jpg \n inflating: /data/training/Sara_Silverman_21.jpg \n inflating: /data/training/Sara_Silverman_22.jpg \n inflating: /data/training/Sara_Silverman_40.jpg \n inflating: /data/training/Sara_Silverman_41.jpg \n inflating: /data/training/Sara_Silverman_42.jpg \n inflating: /data/training/Sara_Silverman_50.jpg \n inflating: /data/training/Sara_Silverman_51.jpg \n inflating: /data/training/Sara_Silverman_52.jpg \n inflating: /data/training/Sarah_Wynter_00.jpg \n inflating: /data/training/Sarah_Wynter_01.jpg \n inflating: /data/training/Sarah_Wynter_02.jpg \n inflating: /data/training/Sarah_Wynter_40.jpg \n inflating: /data/training/Sarah_Wynter_41.jpg \n inflating: /data/training/Sarah_Wynter_42.jpg \n inflating: /data/training/Sarah_Wynter_50.jpg \n inflating: /data/training/Sarah_Wynter_51.jpg \n inflating: /data/training/Sarah_Wynter_52.jpg \n inflating: /data/training/Sasha_Cohen_20.jpg \n inflating: /data/training/Sasha_Cohen_21.jpg \n inflating: /data/training/Sasha_Cohen_22.jpg \n inflating: /data/training/Sasha_Cohen_40.jpg \n inflating: /data/training/Sasha_Cohen_41.jpg \n inflating: /data/training/Sasha_Cohen_42.jpg \n inflating: /data/training/Sasha_Cohen_50.jpg \n inflating: /data/training/Sasha_Cohen_51.jpg \n inflating: /data/training/Sasha_Cohen_52.jpg \n inflating: /data/training/T_Boone_Pickens_10.jpg \n inflating: /data/training/T_Boone_Pickens_11.jpg \n inflating: /data/training/T_Boone_Pickens_12.jpg \n inflating: /data/training/T_Boone_Pickens_20.jpg \n inflating: /data/training/T_Boone_Pickens_21.jpg \n inflating: /data/training/T_Boone_Pickens_22.jpg \n inflating: /data/training/T_Boone_Pickens_30.jpg \n inflating: /data/training/T_Boone_Pickens_31.jpg \n inflating: /data/training/T_Boone_Pickens_32.jpg \n inflating: /data/training/T_Boone_Pickens_50.jpg \n inflating: /data/training/T_Boone_Pickens_51.jpg \n inflating: /data/training/T_Boone_Pickens_52.jpg \n inflating: /data/training/Takeo_Hiranuma_00.jpg \n inflating: /data/training/Takeo_Hiranuma_01.jpg \n inflating: /data/training/Takeo_Hiranuma_02.jpg \n inflating: /data/training/Takeo_Hiranuma_10.jpg \n inflating: /data/training/Takeo_Hiranuma_11.jpg \n inflating: /data/training/Takeo_Hiranuma_12.jpg \n inflating: /data/training/Takeo_Hiranuma_30.jpg \n inflating: /data/training/Takeo_Hiranuma_31.jpg \n inflating: /data/training/Takeo_Hiranuma_32.jpg \n inflating: /data/training/Ted_Turner_20.jpg \n inflating: /data/training/Ted_Turner_21.jpg \n inflating: /data/training/Ted_Turner_22.jpg \n inflating: /data/training/Ted_Turner_30.jpg \n inflating: /data/training/Ted_Turner_31.jpg \n inflating: /data/training/Ted_Turner_32.jpg \n inflating: /data/training/Ted_Turner_50.jpg \n inflating: /data/training/Ted_Turner_51.jpg \n inflating: /data/training/Ted_Turner_52.jpg \n inflating: /data/training/Teresa_Heinz_Kerry_00.jpg \n inflating: /data/training/Teresa_Heinz_Kerry_01.jpg \n inflating: /data/training/Teresa_Heinz_Kerry_02.jpg \n inflating: /data/training/Teresa_Heinz_Kerry_10.jpg \n inflating: /data/training/Teresa_Heinz_Kerry_11.jpg \n inflating: /data/training/Teresa_Heinz_Kerry_12.jpg \n inflating: /data/training/Teresa_Heinz_Kerry_20.jpg \n inflating: /data/training/Teresa_Heinz_Kerry_21.jpg \n inflating: /data/training/Teresa_Heinz_Kerry_22.jpg \n inflating: /data/training/Terje_Roed-Larsen_00.jpg \n inflating: /data/training/Terje_Roed-Larsen_01.jpg \n inflating: /data/training/Terje_Roed-Larsen_02.jpg \n inflating: /data/training/Terje_Roed-Larsen_20.jpg \n inflating: /data/training/Terje_Roed-Larsen_21.jpg \n inflating: /data/training/Terje_Roed-Larsen_22.jpg \n inflating: /data/training/Terje_Roed-Larsen_30.jpg \n inflating: /data/training/Terje_Roed-Larsen_31.jpg \n inflating: /data/training/Terje_Roed-Larsen_32.jpg \n inflating: /data/training/Tessa_Jowell_00.jpg \n inflating: /data/training/Tessa_Jowell_01.jpg \n inflating: /data/training/Tessa_Jowell_02.jpg \n inflating: /data/training/Tessa_Jowell_20.jpg \n inflating: /data/training/Tessa_Jowell_21.jpg \n inflating: /data/training/Tessa_Jowell_22.jpg \n inflating: /data/training/Tessa_Jowell_30.jpg \n inflating: /data/training/Tessa_Jowell_31.jpg \n inflating: /data/training/Tessa_Jowell_32.jpg \n inflating: /data/training/Tessa_Jowell_50.jpg \n inflating: /data/training/Tessa_Jowell_51.jpg \n inflating: /data/training/Tessa_Jowell_52.jpg \n inflating: /data/training/Thomas_Ferguson_00.jpg \n inflating: /data/training/Thomas_Ferguson_01.jpg \n inflating: /data/training/Thomas_Ferguson_02.jpg \n inflating: /data/training/Thomas_Ferguson_10.jpg \n inflating: /data/training/Thomas_Ferguson_11.jpg \n inflating: /data/training/Thomas_Ferguson_12.jpg \n inflating: /data/training/Thomas_Ferguson_50.jpg \n inflating: /data/training/Thomas_Ferguson_51.jpg \n inflating: /data/training/Thomas_Ferguson_52.jpg \n inflating: /data/training/Tim_Howard_00.jpg \n inflating: /data/training/Tim_Howard_01.jpg \n inflating: /data/training/Tim_Howard_02.jpg \n inflating: /data/training/Tim_Howard_10.jpg \n inflating: /data/training/Tim_Howard_11.jpg \n inflating: /data/training/Tim_Howard_12.jpg \n inflating: /data/training/Tim_Howard_30.jpg \n inflating: /data/training/Tim_Howard_31.jpg \n inflating: /data/training/Tim_Howard_32.jpg \n inflating: /data/training/Tim_Pawlenty_00.jpg \n inflating: /data/training/Tim_Pawlenty_01.jpg \n inflating: /data/training/Tim_Pawlenty_02.jpg \n inflating: /data/training/Tim_Pawlenty_30.jpg \n inflating: /data/training/Tim_Pawlenty_31.jpg \n inflating: /data/training/Tim_Pawlenty_32.jpg \n inflating: /data/training/Tim_Pawlenty_40.jpg \n inflating: /data/training/Tim_Pawlenty_41.jpg \n inflating: /data/training/Tim_Pawlenty_42.jpg \n inflating: /data/training/Tim_Pawlenty_50.jpg \n inflating: /data/training/Tim_Pawlenty_51.jpg \n inflating: /data/training/Tim_Pawlenty_52.jpg \n inflating: /data/training/Timothy_Goebel_00.jpg \n inflating: /data/training/Timothy_Goebel_01.jpg \n inflating: /data/training/Timothy_Goebel_02.jpg \n inflating: /data/training/Timothy_Goebel_30.jpg \n inflating: /data/training/Timothy_Goebel_31.jpg \n inflating: /data/training/Timothy_Goebel_32.jpg \n inflating: /data/training/Timothy_Goebel_40.jpg \n inflating: /data/training/Timothy_Goebel_41.jpg \n inflating: /data/training/Timothy_Goebel_42.jpg \n inflating: /data/training/Timothy_Goebel_50.jpg \n inflating: /data/training/Timothy_Goebel_51.jpg \n inflating: /data/training/Timothy_Goebel_52.jpg \n inflating: /data/training/Tina_Brown_20.jpg \n inflating: /data/training/Tina_Brown_21.jpg \n inflating: /data/training/Tina_Brown_22.jpg \n inflating: /data/training/Tina_Brown_40.jpg \n inflating: /data/training/Tina_Brown_41.jpg \n inflating: /data/training/Tina_Brown_42.jpg \n inflating: /data/training/Tina_Brown_50.jpg \n inflating: /data/training/Tina_Brown_51.jpg \n inflating: /data/training/Tina_Brown_52.jpg \n inflating: /data/training/Tom_Coughlin_20.jpg \n inflating: /data/training/Tom_Coughlin_21.jpg \n inflating: /data/training/Tom_Coughlin_22.jpg \n inflating: /data/training/Tom_Coughlin_30.jpg \n inflating: /data/training/Tom_Coughlin_31.jpg \n inflating: /data/training/Tom_Coughlin_32.jpg \n inflating: /data/training/Tom_Coughlin_50.jpg \n inflating: /data/training/Tom_Coughlin_51.jpg \n inflating: /data/training/Tom_Coughlin_52.jpg \n inflating: /data/training/Tom_Hanks_30.jpg \n inflating: /data/training/Tom_Hanks_31.jpg \n inflating: /data/training/Tom_Hanks_32.jpg \n inflating: /data/training/Tom_Hanks_40.jpg \n inflating: /data/training/Tom_Hanks_41.jpg \n inflating: /data/training/Tom_Hanks_42.jpg \n inflating: /data/training/Tom_Hanks_50.jpg \n inflating: /data/training/Tom_Hanks_51.jpg \n inflating: /data/training/Tom_Hanks_52.jpg \n inflating: /data/training/Tom_Harkin_00.jpg \n inflating: /data/training/Tom_Harkin_01.jpg \n inflating: /data/training/Tom_Harkin_02.jpg \n inflating: /data/training/Tom_Harkin_30.jpg \n inflating: /data/training/Tom_Harkin_31.jpg \n inflating: /data/training/Tom_Harkin_32.jpg \n inflating: /data/training/Tom_Harkin_40.jpg \n inflating: /data/training/Tom_Harkin_41.jpg \n inflating: /data/training/Tom_Harkin_42.jpg \n inflating: /data/training/Tom_Osborne_20.jpg \n inflating: /data/training/Tom_Osborne_21.jpg \n inflating: /data/training/Tom_Osborne_22.jpg \n inflating: /data/training/Tom_Osborne_30.jpg \n inflating: /data/training/Tom_Osborne_31.jpg \n inflating: /data/training/Tom_Osborne_32.jpg \n inflating: /data/training/Tom_Osborne_50.jpg \n inflating: /data/training/Tom_Osborne_51.jpg \n inflating: /data/training/Tom_Osborne_52.jpg \n inflating: /data/training/Tom_Ridge_20.jpg \n inflating: /data/training/Tom_Ridge_21.jpg \n inflating: /data/training/Tom_Ridge_22.jpg \n inflating: /data/training/Tom_Ridge_30.jpg \n inflating: /data/training/Tom_Ridge_31.jpg \n inflating: /data/training/Tom_Ridge_32.jpg \n inflating: /data/training/Tom_Ridge_50.jpg \n inflating: /data/training/Tom_Ridge_51.jpg \n inflating: /data/training/Tom_Ridge_52.jpg \n inflating: /data/training/Tom_Sizemore_00.jpg \n inflating: /data/training/Tom_Sizemore_01.jpg \n inflating: /data/training/Tom_Sizemore_02.jpg \n inflating: /data/training/Tom_Sizemore_10.jpg \n inflating: /data/training/Tom_Sizemore_11.jpg \n inflating: /data/training/Tom_Sizemore_12.jpg \n inflating: /data/training/Tom_Sizemore_20.jpg \n inflating: /data/training/Tom_Sizemore_21.jpg \n inflating: /data/training/Tom_Sizemore_22.jpg \n inflating: /data/training/Valerie_Harper_00.jpg \n inflating: /data/training/Valerie_Harper_01.jpg \n inflating: /data/training/Valerie_Harper_02.jpg \n inflating: /data/training/Valerie_Harper_30.jpg \n inflating: /data/training/Valerie_Harper_31.jpg \n inflating: /data/training/Valerie_Harper_32.jpg \n inflating: /data/training/Valerie_Harper_40.jpg \n inflating: /data/training/Valerie_Harper_41.jpg \n inflating: /data/training/Valerie_Harper_42.jpg \n inflating: /data/training/Valerie_Harper_50.jpg \n inflating: /data/training/Valerie_Harper_51.jpg \n inflating: /data/training/Valerie_Harper_52.jpg \n inflating: /data/training/Vicente_Fox_10.jpg \n inflating: /data/training/Vicente_Fox_11.jpg \n inflating: /data/training/Vicente_Fox_12.jpg \n inflating: /data/training/Vicente_Fox_20.jpg \n inflating: /data/training/Vicente_Fox_21.jpg \n inflating: /data/training/Vicente_Fox_22.jpg \n inflating: /data/training/Vicente_Fox_30.jpg \n inflating: /data/training/Vicente_Fox_31.jpg \n inflating: /data/training/Vicente_Fox_32.jpg \n inflating: /data/training/Vojislav_Seselj_00.jpg \n inflating: /data/training/Vojislav_Seselj_01.jpg \n inflating: /data/training/Vojislav_Seselj_02.jpg \n inflating: /data/training/Vojislav_Seselj_20.jpg \n inflating: /data/training/Vojislav_Seselj_21.jpg \n inflating: /data/training/Vojislav_Seselj_22.jpg \n inflating: /data/training/Vojislav_Seselj_40.jpg \n inflating: /data/training/Vojislav_Seselj_41.jpg \n inflating: /data/training/Vojislav_Seselj_42.jpg \n inflating: /data/training/Vojislav_Seselj_50.jpg \n inflating: /data/training/Vojislav_Seselj_51.jpg \n inflating: /data/training/Vojislav_Seselj_52.jpg \n inflating: /data/training/Warren_Beatty_10.jpg \n inflating: /data/training/Warren_Beatty_11.jpg \n inflating: /data/training/Warren_Beatty_12.jpg \n inflating: /data/training/Warren_Beatty_30.jpg \n inflating: /data/training/Warren_Beatty_31.jpg \n inflating: /data/training/Warren_Beatty_32.jpg \n inflating: /data/training/Warren_Beatty_50.jpg \n inflating: /data/training/Warren_Beatty_51.jpg \n inflating: /data/training/Warren_Beatty_52.jpg \n inflating: /data/training/Warren_Buffett_00.jpg \n inflating: /data/training/Warren_Buffett_01.jpg \n inflating: /data/training/Warren_Buffett_02.jpg \n inflating: /data/training/Warren_Buffett_30.jpg \n inflating: /data/training/Warren_Buffett_31.jpg \n inflating: /data/training/Warren_Buffett_32.jpg \n inflating: /data/training/Warren_Buffett_40.jpg \n inflating: /data/training/Warren_Buffett_41.jpg \n inflating: /data/training/Warren_Buffett_42.jpg \n inflating: /data/training/Warren_Buffett_50.jpg \n inflating: /data/training/Warren_Buffett_51.jpg \n inflating: /data/training/Warren_Buffett_52.jpg \n inflating: /data/training/Wayne_Allard_00.jpg \n inflating: /data/training/Wayne_Allard_01.jpg \n inflating: /data/training/Wayne_Allard_02.jpg \n inflating: /data/training/Wayne_Allard_20.jpg \n inflating: /data/training/Wayne_Allard_21.jpg \n inflating: /data/training/Wayne_Allard_22.jpg \n inflating: /data/training/Wayne_Allard_50.jpg \n inflating: /data/training/Wayne_Allard_51.jpg \n inflating: /data/training/Wayne_Allard_52.jpg \n inflating: /data/training/Wayne_Gretzky_20.jpg \n inflating: /data/training/Wayne_Gretzky_21.jpg \n inflating: /data/training/Wayne_Gretzky_22.jpg \n inflating: /data/training/Wayne_Gretzky_30.jpg \n inflating: /data/training/Wayne_Gretzky_31.jpg \n inflating: /data/training/Wayne_Gretzky_32.jpg \n inflating: /data/training/Wayne_Gretzky_40.jpg \n inflating: /data/training/Wayne_Gretzky_41.jpg \n inflating: /data/training/Wayne_Gretzky_42.jpg \n inflating: /data/training/Wayne_Newton_10.jpg \n inflating: /data/training/Wayne_Newton_11.jpg \n inflating: /data/training/Wayne_Newton_12.jpg \n inflating: /data/training/Wayne_Newton_20.jpg \n inflating: /data/training/Wayne_Newton_21.jpg \n inflating: /data/training/Wayne_Newton_22.jpg \n inflating: /data/training/Wayne_Newton_40.jpg \n inflating: /data/training/Wayne_Newton_41.jpg \n inflating: /data/training/Wayne_Newton_42.jpg \n inflating: /data/training/Wes_Craven_00.jpg \n inflating: /data/training/Wes_Craven_01.jpg \n inflating: /data/training/Wes_Craven_02.jpg \n inflating: /data/training/Wes_Craven_20.jpg \n inflating: /data/training/Wes_Craven_21.jpg \n inflating: /data/training/Wes_Craven_22.jpg \n inflating: /data/training/Wes_Craven_30.jpg \n inflating: /data/training/Wes_Craven_31.jpg \n inflating: /data/training/Wes_Craven_32.jpg \n inflating: /data/training/Wes_Craven_50.jpg \n inflating: /data/training/Wes_Craven_51.jpg \n inflating: /data/training/Wes_Craven_52.jpg \n inflating: /data/training/Wesley_Clark_00.jpg \n inflating: /data/training/Wesley_Clark_01.jpg \n inflating: /data/training/Wesley_Clark_02.jpg \n inflating: /data/training/Wesley_Clark_20.jpg \n inflating: /data/training/Wesley_Clark_21.jpg \n inflating: /data/training/Wesley_Clark_22.jpg \n inflating: /data/training/Wesley_Clark_30.jpg \n inflating: /data/training/Wesley_Clark_31.jpg \n inflating: /data/training/Wesley_Clark_32.jpg \n inflating: /data/training/Wesley_Clark_40.jpg \n inflating: /data/training/Wesley_Clark_41.jpg \n inflating: /data/training/Wesley_Clark_42.jpg \n inflating: /data/training/Whoopi_Goldberg_00.jpg \n inflating: /data/training/Whoopi_Goldberg_01.jpg \n inflating: /data/training/Whoopi_Goldberg_02.jpg \n inflating: /data/training/Whoopi_Goldberg_40.jpg \n inflating: /data/training/Whoopi_Goldberg_41.jpg \n inflating: /data/training/Whoopi_Goldberg_42.jpg \n inflating: /data/training/Whoopi_Goldberg_50.jpg \n inflating: /data/training/Whoopi_Goldberg_51.jpg \n inflating: /data/training/Whoopi_Goldberg_52.jpg \n inflating: /data/training/William_Delahunt_00.jpg \n inflating: /data/training/William_Delahunt_01.jpg \n inflating: /data/training/William_Delahunt_02.jpg \n inflating: /data/training/William_Delahunt_10.jpg \n inflating: /data/training/William_Delahunt_11.jpg \n inflating: /data/training/William_Delahunt_12.jpg \n inflating: /data/training/William_Delahunt_20.jpg \n inflating: /data/training/William_Delahunt_21.jpg \n inflating: /data/training/William_Delahunt_22.jpg \n inflating: /data/training/William_Donaldson_00.jpg \n inflating: /data/training/William_Donaldson_01.jpg \n inflating: /data/training/William_Donaldson_02.jpg \n inflating: /data/training/William_Donaldson_10.jpg \n inflating: /data/training/William_Donaldson_11.jpg \n inflating: /data/training/William_Donaldson_12.jpg \n inflating: /data/training/William_Donaldson_50.jpg \n inflating: /data/training/William_Donaldson_51.jpg \n inflating: /data/training/William_Donaldson_52.jpg \n inflating: /data/training/William_McDonough_00.jpg \n inflating: /data/training/William_McDonough_01.jpg \n inflating: /data/training/William_McDonough_02.jpg \n inflating: /data/training/William_McDonough_20.jpg \n inflating: /data/training/William_McDonough_21.jpg \n inflating: /data/training/William_McDonough_22.jpg \n inflating: /data/training/William_McDonough_30.jpg \n inflating: /data/training/William_McDonough_31.jpg \n inflating: /data/training/William_McDonough_32.jpg \n inflating: /data/training/William_McDonough_40.jpg \n inflating: /data/training/William_McDonough_41.jpg \n inflating: /data/training/William_McDonough_42.jpg \n inflating: /data/training/Yang_Jianli_00.jpg \n inflating: /data/training/Yang_Jianli_01.jpg \n inflating: /data/training/Yang_Jianli_02.jpg \n inflating: /data/training/Yang_Jianli_10.jpg \n inflating: /data/training/Yang_Jianli_11.jpg \n inflating: /data/training/Yang_Jianli_12.jpg \n inflating: /data/training/Yang_Jianli_30.jpg \n inflating: /data/training/Yang_Jianli_31.jpg \n inflating: /data/training/Yang_Jianli_32.jpg \n inflating: /data/training/Yuri_Fedotov_20.jpg \n inflating: /data/training/Yuri_Fedotov_21.jpg \n inflating: /data/training/Yuri_Fedotov_22.jpg \n inflating: /data/training/Yuri_Fedotov_30.jpg \n inflating: /data/training/Yuri_Fedotov_31.jpg \n inflating: /data/training/Yuri_Fedotov_32.jpg \n inflating: /data/training/Yuri_Fedotov_40.jpg \n inflating: /data/training/Yuri_Fedotov_41.jpg \n inflating: /data/training/Yuri_Fedotov_42.jpg \n inflating: /data/training/Zhang_Ziyi_10.jpg \n inflating: /data/training/Zhang_Ziyi_11.jpg \n inflating: /data/training/Zhang_Ziyi_12.jpg \n inflating: /data/training/Zhang_Ziyi_20.jpg \n inflating: /data/training/Zhang_Ziyi_21.jpg \n inflating: /data/training/Zhang_Ziyi_22.jpg \n inflating: /data/training/Zhang_Ziyi_40.jpg \n inflating: /data/training/Zhang_Ziyi_41.jpg \n inflating: /data/training/Zhang_Ziyi_42.jpg \n inflating: /data/training/Zhong_Nanshan_00.jpg \n inflating: /data/training/Zhong_Nanshan_01.jpg \n inflating: /data/training/Zhong_Nanshan_02.jpg \n inflating: /data/training/Zhong_Nanshan_10.jpg \n inflating: /data/training/Zhong_Nanshan_11.jpg \r\n inflating: /data/training/Zhong_Nanshan_12.jpg \r\n inflating: /data/training/Zhong_Nanshan_50.jpg \r\n inflating: /data/training/Zhong_Nanshan_51.jpg \r\n inflating: /data/training/Zhong_Nanshan_52.jpg \r\n inflating: /data/training_frames_keypoints.csv \r\n"
]
],
[
[
"<div class=\"alert alert-info\">**Note:** Workspaces automatically close connections after 30 minutes of inactivity (including inactivity while training!). Use the code snippet below to keep your workspace alive during training. (The active_session context manager is imported below.)\n</div>\n```\nfrom workspace_utils import active_session\n\nwith active_session():\n train_model(num_epochs)\n```\n",
"_____no_output_____"
]
],
[
[
"# import the usual resources\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# import utilities to keep workspaces alive during model training\nfrom workspace_utils import active_session\n\n# watch for any changes in model.py, if it changes, re-load it automatically\n%load_ext autoreload\n%autoreload 2",
"_____no_output_____"
],
[
"## TODO: Define the Net in models.py\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n## TODO: Once you've define the network, you can instantiate it\n# one example conv layer has been provided for you\nfrom models import Net\n\nnet = Net()\nprint(net)",
"Net(\n (conv1): Conv2d(1, 32, kernel_size=(5, 5), stride=(1, 1))\n (pool1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n (conv2): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1))\n (pool2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n (conv3): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1))\n (pool3): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n (conv4): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1))\n (pool4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n (conv5): Conv2d(256, 512, kernel_size=(1, 1), stride=(1, 1))\n (pool5): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n (fc1): Linear(in_features=18432, out_features=1024, bias=True)\n (fc2): Linear(in_features=1024, out_features=136, bias=True)\n (drop1): Dropout(p=0.1)\n (drop2): Dropout(p=0.2)\n (drop3): Dropout(p=0.3)\n (drop4): Dropout(p=0.4)\n (drop5): Dropout(p=0.5)\n (drop6): Dropout(p=0.6)\n)\n"
]
],
[
[
"## Transform the dataset \n\nTo prepare for training, create a transformed dataset of images and keypoints.\n\n### TODO: Define a data transform\n\nIn PyTorch, a convolutional neural network expects a torch image of a consistent size as input. For efficient training, and so your model's loss does not blow up during training, it is also suggested that you normalize the input images and keypoints. The necessary transforms have been defined in `data_load.py` and you **do not** need to modify these; take a look at this file (you'll see the same transforms that were defined and applied in Notebook 1).\n\nTo define the data transform below, use a [composition](http://pytorch.org/tutorials/beginner/data_loading_tutorial.html#compose-transforms) of:\n1. Rescaling and/or cropping the data, such that you are left with a square image (the suggested size is 224x224px)\n2. Normalizing the images and keypoints; turning each RGB image into a grayscale image with a color range of [0, 1] and transforming the given keypoints into a range of [-1, 1]\n3. Turning these images and keypoints into Tensors\n\nThese transformations have been defined in `data_load.py`, but it's up to you to call them and create a `data_transform` below. **This transform will be applied to the training data and, later, the test data**. It will change how you go about displaying these images and keypoints, but these steps are essential for efficient training.\n\nAs a note, should you want to perform data augmentation (which is optional in this project), and randomly rotate or shift these images, a square image size will be useful; rotating a 224x224 image by 90 degrees will result in the same shape of output.",
"_____no_output_____"
]
],
[
[
"from torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, utils\n\n# the dataset we created in Notebook 1 is copied in the helper file `data_load.py`\nfrom data_load import FacialKeypointsDataset\n# the transforms we defined in Notebook 1 are in the helper file `data_load.py`\nfrom data_load import Rescale, RandomCrop, Normalize, ToTensor\n\n\n## TODO: define the data_transform using transforms.Compose([all tx's, . , .])\n# order matters! i.e. rescaling should come before a smaller crop\ndata_transform = transforms.Compose([Rescale(250), RandomCrop(224), Normalize(), ToTensor()])\n\n# testing that you've defined a transform\nassert(data_transform is not None), 'Define a data_transform'",
"_____no_output_____"
],
[
"# create the transformed dataset\ntransformed_dataset = FacialKeypointsDataset(csv_file='/data/training_frames_keypoints.csv',\n root_dir='/data/training/',\n transform=data_transform)\n\n\nprint('Number of images: ', len(transformed_dataset))\n\n# iterate through the transformed dataset and print some stats about the first few samples\nfor i in range(4):\n sample = transformed_dataset[i]\n print(i, sample['image'].size(), sample['keypoints'].size())",
"Number of images: 3462\n0 torch.Size([1, 224, 224]) torch.Size([68, 2])\n1 torch.Size([1, 224, 224]) torch.Size([68, 2])\n2 torch.Size([1, 224, 224]) torch.Size([68, 2])\n3 torch.Size([1, 224, 224]) torch.Size([68, 2])\n"
]
],
[
[
"## Batching and loading data\n\nNext, having defined the transformed dataset, we can use PyTorch's DataLoader class to load the training data in batches of whatever size as well as to shuffle the data for training the model. You can read more about the parameters of the DataLoader, in [this documentation](http://pytorch.org/docs/master/data.html).\n\n#### Batch size\nDecide on a good batch size for training your model. Try both small and large batch sizes and note how the loss decreases as the model trains. Too large a batch size may cause your model to crash and/or run out of memory while training.\n\n**Note for Windows users**: Please change the `num_workers` to 0 or you may face some issues with your DataLoader failing.",
"_____no_output_____"
]
],
[
[
"# load training data in batches\nbatch_size = 10\n\ntrain_loader = DataLoader(transformed_dataset, \n batch_size=batch_size,\n shuffle=True, \n num_workers=4)\n",
"_____no_output_____"
]
],
[
[
"## Before training\n\nTake a look at how this model performs before it trains. You should see that the keypoints it predicts start off in one spot and don't match the keypoints on a face at all! It's interesting to visualize this behavior so that you can compare it to the model after training and see how the model has improved.\n\n#### Load in the test dataset\n\nThe test dataset is one that this model has *not* seen before, meaning it has not trained with these images. We'll load in this test data and before and after training, see how your model performs on this set!\n\nTo visualize this test data, we have to go through some un-transformation steps to turn our images into python images from tensors and to turn our keypoints back into a recognizable range. ",
"_____no_output_____"
]
],
[
[
"# load in the test data, using the dataset class\n# AND apply the data_transform you defined above\n\n# create the test dataset\ntest_dataset = FacialKeypointsDataset(csv_file='/data/test_frames_keypoints.csv',\n root_dir='/data/test/',\n transform=data_transform)\n\n",
"_____no_output_____"
],
[
"# load test data in batches\nbatch_size = 10\n\ntest_loader = DataLoader(test_dataset, \n batch_size=batch_size,\n shuffle=True, \n num_workers=4)",
"_____no_output_____"
]
],
[
[
"## Apply the model on a test sample\n\nTo test the model on a test sample of data, you have to follow these steps:\n1. Extract the image and ground truth keypoints from a sample\n2. Wrap the image in a Variable, so that the net can process it as input and track how it changes as the image moves through the network.\n3. Make sure the image is a FloatTensor, which the model expects.\n4. Forward pass the image through the net to get the predicted, output keypoints.\n\nThis function test how the network performs on the first batch of test data. It returns the images, the transformed images, the predicted keypoints (produced by the model), and the ground truth keypoints.",
"_____no_output_____"
]
],
[
[
"# test the model on a batch of test images\n\ndef net_sample_output():\n \n # iterate through the test dataset\n for i, sample in enumerate(test_loader):\n \n # get sample data: images and ground truth keypoints\n images = sample['image']\n key_pts = sample['keypoints']\n\n # convert images to FloatTensors\n images = images.type(torch.FloatTensor)\n\n # forward pass to get net output\n output_pts = net(images)\n \n # reshape to batch_size x 68 x 2 pts\n output_pts = output_pts.view(output_pts.size()[0], 68, -1)\n \n # break after first image is tested\n if i == 0:\n return images, output_pts, key_pts\n ",
"_____no_output_____"
]
],
[
[
"#### Debugging tips\n\nIf you get a size or dimension error here, make sure that your network outputs the expected number of keypoints! Or if you get a Tensor type error, look into changing the above code that casts the data into float types: `images = images.type(torch.FloatTensor)`.",
"_____no_output_____"
]
],
[
[
"# call the above function\n# returns: test images, test predicted keypoints, test ground truth keypoints\ntest_images, test_outputs, gt_pts = net_sample_output()\n\n# print out the dimensions of the data to see if they make sense\nprint(test_images.data.size())\nprint(test_outputs.data.size())\nprint(gt_pts.size())",
"/home/workspace/data_load.py:39: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead.\n key_pts = self.key_pts_frame.iloc[idx, 1:].as_matrix()\n/home/workspace/data_load.py:39: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead.\n key_pts = self.key_pts_frame.iloc[idx, 1:].as_matrix()\n/home/workspace/data_load.py:39: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead.\n key_pts = self.key_pts_frame.iloc[idx, 1:].as_matrix()\n/home/workspace/data_load.py:39: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead.\n key_pts = self.key_pts_frame.iloc[idx, 1:].as_matrix()\n"
]
],
[
[
"## Visualize the predicted keypoints\n\nOnce we've had the model produce some predicted output keypoints, we can visualize these points in a way that's similar to how we've displayed this data before, only this time, we have to \"un-transform\" the image/keypoint data to display it.\n\nNote that I've defined a *new* function, `show_all_keypoints` that displays a grayscale image, its predicted keypoints and its ground truth keypoints (if provided).",
"_____no_output_____"
]
],
[
[
"def show_all_keypoints(image, predicted_key_pts, gt_pts=None):\n \"\"\"Show image with predicted keypoints\"\"\"\n # image is grayscale\n plt.imshow(image, cmap='gray')\n plt.scatter(predicted_key_pts[:, 0], predicted_key_pts[:, 1], s=20, marker='.', c='m')\n # plot ground truth points as green pts\n if gt_pts is not None:\n plt.scatter(gt_pts[:, 0], gt_pts[:, 1], s=20, marker='.', c='g')\n",
"_____no_output_____"
]
],
[
[
"#### Un-transformation\n\nNext, you'll see a helper function. `visualize_output` that takes in a batch of images, predicted keypoints, and ground truth keypoints and displays a set of those images and their true/predicted keypoints.\n\nThis function's main role is to take batches of image and keypoint data (the input and output of your CNN), and transform them into numpy images and un-normalized keypoints (x, y) for normal display. The un-transformation process turns keypoints and images into numpy arrays from Tensors *and* it undoes the keypoint normalization done in the Normalize() transform; it's assumed that you applied these transformations when you loaded your test data.",
"_____no_output_____"
]
],
[
[
"# visualize the output\n# by default this shows a batch of 10 images\ndef visualize_output(test_images, test_outputs, gt_pts=None, batch_size=10):\n\n for i in range(batch_size):\n plt.figure(figsize=(20,10))\n ax = plt.subplot(1, batch_size, i+1)\n\n # un-transform the image data\n image = test_images[i].data # get the image from it's Variable wrapper\n image = image.numpy() # convert to numpy array from a Tensor\n image = np.transpose(image, (1, 2, 0)) # transpose to go from torch to numpy image\n\n # un-transform the predicted key_pts data\n predicted_key_pts = test_outputs[i].data\n predicted_key_pts = predicted_key_pts.numpy()\n # undo normalization of keypoints \n predicted_key_pts = predicted_key_pts*50.0+100\n \n # plot ground truth points for comparison, if they exist\n ground_truth_pts = None\n if gt_pts is not None:\n ground_truth_pts = gt_pts[i] \n ground_truth_pts = ground_truth_pts*50.0+100\n \n # call show_all_keypoints\n show_all_keypoints(np.squeeze(image), predicted_key_pts, ground_truth_pts)\n \n plt.axis('off')\n\n plt.show()\n \n# call it\nvisualize_output(test_images, test_outputs, gt_pts)",
"_____no_output_____"
]
],
[
[
"## Training\n\n#### Loss function\nTraining a network to predict keypoints is different than training a network to predict a class; instead of outputting a distribution of classes and using cross entropy loss, you may want to choose a loss function that is suited for regression, which directly compares a predicted value and target value. Read about the various kinds of loss functions (like MSE or L1/SmoothL1 loss) in [this documentation](http://pytorch.org/docs/master/_modules/torch/nn/modules/loss.html).\n\n### TODO: Define the loss and optimization\n\nNext, you'll define how the model will train by deciding on the loss function and optimizer.\n\n---",
"_____no_output_____"
]
],
[
[
"## TODO: Define the loss and optimization\nimport torch.optim as optim\n\ncriterion = nn.MSELoss()\n\noptimizer = optim.Adam(net.parameters(), lr=0.0001, amsgrad=True, weight_decay=0)\n",
"_____no_output_____"
]
],
[
[
"## Training and Initial Observation\n\nNow, you'll train on your batched training data from `train_loader` for a number of epochs. \n\nTo quickly observe how your model is training and decide on whether or not you should modify it's structure or hyperparameters, you're encouraged to start off with just one or two epochs at first. As you train, note how your the model's loss behaves over time: does it decrease quickly at first and then slow down? Does it take a while to decrease in the first place? What happens if you change the batch size of your training data or modify your loss function? etc. \n\nUse these initial observations to make changes to your model and decide on the best architecture before you train for many epochs and create a final model.",
"_____no_output_____"
]
],
[
[
"def train_net(n_epochs):\n\n # prepare the net for training\n net.train()\n\n for epoch in range(n_epochs): # loop over the dataset multiple times\n \n running_loss = 0.0\n\n # train on batches of data, assumes you already have train_loader\n for batch_i, data in enumerate(train_loader):\n # get the input images and their corresponding labels\n images = data['image']\n key_pts = data['keypoints']\n\n # flatten pts\n key_pts = key_pts.view(key_pts.size(0), -1)\n\n # convert variables to floats for regression loss\n key_pts = key_pts.type(torch.FloatTensor)\n images = images.type(torch.FloatTensor)\n\n # forward pass to get outputs\n output_pts = net(images)\n\n # calculate the loss between predicted and target keypoints\n loss = criterion(output_pts, key_pts)\n\n # zero the parameter (weight) gradients\n optimizer.zero_grad()\n \n # backward pass to calculate the weight gradients\n loss.backward()\n\n # update the weights\n optimizer.step()\n\n # print loss statistics\n running_loss += loss.item()\n if batch_i % 10 == 9: # print every 10 batches\n print('Epoch: {}, Batch: {}, Avg. Loss: {}'.format(epoch + 1, batch_i+1, running_loss/10))\n running_loss = 0.0\n\n print('Finished Training')\n",
"_____no_output_____"
],
[
"# train your network\nn_epochs = 5 # start small, and increase when you've decided on your model structure and hyperparams\n\n# this is a Workspaces-specific context manager to keep the connection\n# alive while training your model, not part of pytorch\nwith active_session():\n train_net(n_epochs)",
"/home/workspace/data_load.py:39: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead.\n key_pts = self.key_pts_frame.iloc[idx, 1:].as_matrix()\n/home/workspace/data_load.py:39: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead.\n key_pts = self.key_pts_frame.iloc[idx, 1:].as_matrix()\n/home/workspace/data_load.py:39: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead.\n key_pts = self.key_pts_frame.iloc[idx, 1:].as_matrix()\n/home/workspace/data_load.py:39: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead.\n key_pts = self.key_pts_frame.iloc[idx, 1:].as_matrix()\n"
]
],
[
[
"## Test data\n\nSee how your model performs on previously unseen, test data. We've already loaded and transformed this data, similar to the training data. Next, run your trained model on these images to see what kind of keypoints are produced. You should be able to see if your model is fitting each new face it sees, if the points are distributed randomly, or if the points have actually overfitted the training data and do not generalize.",
"_____no_output_____"
]
],
[
[
"# get a sample of test data again\ntest_images, test_outputs, gt_pts = net_sample_output()\n\nprint(test_images.data.size())\nprint(test_outputs.data.size())\nprint(gt_pts.size())",
"/home/workspace/data_load.py:39: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead.\n key_pts = self.key_pts_frame.iloc[idx, 1:].as_matrix()\n/home/workspace/data_load.py:39: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead.\n key_pts = self.key_pts_frame.iloc[idx, 1:].as_matrix()\n/home/workspace/data_load.py:39: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead.\n key_pts = self.key_pts_frame.iloc[idx, 1:].as_matrix()\n/home/workspace/data_load.py:39: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead.\n key_pts = self.key_pts_frame.iloc[idx, 1:].as_matrix()\n"
],
[
"## TODO: visualize your test output\n# you can use the same function as before, by un-commenting the line below:\n\nvisualize_output(test_images, test_outputs, gt_pts)\n",
"_____no_output_____"
]
],
[
[
"Once you've found a good model (or two), save your model so you can load it and use it later!\n\nSave your models but please **delete any checkpoints and saved models before you submit your project** otherwise your workspace may be too large to submit.",
"_____no_output_____"
]
],
[
[
"## TODO: change the name to something uniqe for each new model\nmodel_dir = 'saved_models/'\nmodel_name = 'keypoints_model_1.pt'\n\n# after training, save your model parameters in the dir 'saved_models'\ntorch.save(net.state_dict(), model_dir+model_name)",
"_____no_output_____"
]
],
[
[
"After you've trained a well-performing model, answer the following questions so that we have some insight into your training and architecture selection process. Answering all questions is required to pass this project.",
"_____no_output_____"
],
[
"### Question 1: What optimization and loss functions did you choose and why?\n",
"_____no_output_____"
],
[
"**Answer**: I used the Adam optimizer and MSE (Mean Squared Error) loss function. Adam is an adaptive method and computes individual learning rates for different parameters. MSE is the sum of squared distances between target variable and predicted values. These were suitable for our case.",
"_____no_output_____"
],
[
"### Question 2: What kind of network architecture did you start with and how did it change as you tried different architectures? Did you decide to add more convolutional layers or any layers to avoid overfitting the data?",
"_____no_output_____"
],
[
"**Answer**: I started with 6 convolution layer network followed by linear and dropout.The loss was high. I then added maxpool layers in between , followed by linear and then dropout.",
"_____no_output_____"
],
[
"### Question 3: How did you decide on the number of epochs and batch_size to train your model?",
"_____no_output_____"
],
[
"**Answer**: I started by 1 epoch which had Avg. Loss: 0.31769979521632197, then I changed to 5 so it trains better. Batch size I used 10",
"_____no_output_____"
],
[
"## Feature Visualization\n\nSometimes, neural networks are thought of as a black box, given some input, they learn to produce some output. CNN's are actually learning to recognize a variety of spatial patterns and you can visualize what each convolutional layer has been trained to recognize by looking at the weights that make up each convolutional kernel and applying those one at a time to a sample image. This technique is called feature visualization and it's useful for understanding the inner workings of a CNN.",
"_____no_output_____"
],
[
"In the cell below, you can see how to extract a single filter (by index) from your first convolutional layer. The filter should appear as a grayscale grid.",
"_____no_output_____"
]
],
[
[
"# Get the weights in the first conv layer, \"conv1\"\n# if necessary, change this to reflect the name of your first conv layer\nweights1 = net.conv1.weight.data\n\nw = weights1.numpy()\n\nfilter_index = 0\n\nprint(w[filter_index][0])\nprint(w[filter_index][0].shape)\n\n# display the filter weights\nplt.imshow(w[filter_index][0], cmap='gray')\n",
"[[-0.09300843 -0.09894487 -0.0775127 0.11723452 0.1628346 ]\n [ 0.00276445 -0.12837884 0.05001219 0.06232369 0.03360238]\n [-0.04837777 -0.0013669 -0.09743962 -0.16683996 0.06266965]\n [-0.14403643 0.08261782 0.08928844 0.08965611 -0.08103066]\n [-0.00195793 -0.06173778 0.02558543 -0.0502831 0.19364136]]\n(5, 5)\n"
]
],
[
[
"## Feature maps\n\nEach CNN has at least one convolutional layer that is composed of stacked filters (also known as convolutional kernels). As a CNN trains, it learns what weights to include in it's convolutional kernels and when these kernels are applied to some input image, they produce a set of **feature maps**. So, feature maps are just sets of filtered images; they are the images produced by applying a convolutional kernel to an input image. These maps show us the features that the different layers of the neural network learn to extract. For example, you might imagine a convolutional kernel that detects the vertical edges of a face or another one that detects the corners of eyes. You can see what kind of features each of these kernels detects by applying them to an image. One such example is shown below; from the way it brings out the lines in an the image, you might characterize this as an edge detection filter.\n\n<img src='images/feature_map_ex.png' width=50% height=50%/>\n\n\nNext, choose a test image and filter it with one of the convolutional kernels in your trained CNN; look at the filtered output to get an idea what that particular kernel detects.\n\n### TODO: Filter an image to see the effect of a convolutional kernel\n---",
"_____no_output_____"
]
],
[
[
"##TODO: load in and display any image from the transformed test dataset\n\n## TODO: Using cv's filter2D function,\n## apply a specific set of filter weights (like the one displayed above) to the test image\nimport cv2\nimage = cv2.imread('./images/the_beatles.jpg')\nimage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\nplt.imshow(image)\nplt.xticks([]), plt.yticks([])\nplt.title(\"Orginal Image\")\n\nfiltered = cv2.filter2D(image, -1, w[filter_index][0])\nfig = plt.figure()\nax = fig.add_subplot(121, xticks = [], yticks = [])\nax.imshow(filtered)\nax.set_title(\"Feature Map\")\nax = fig.add_subplot(122, xticks = [], yticks = [])\nax.imshow(w[filter_index][0], cmap = 'gray')\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Question 4: Choose one filter from your trained CNN and apply it to a test image; what purpose do you think it plays? What kind of feature do you think it detects?\n",
"_____no_output_____"
],
[
"**Answer**: It detect vertical lines and certain features",
"_____no_output_____"
],
[
"---\n## Moving on!\n\nNow that you've defined and trained your model (and saved the best model), you are ready to move on to the last notebook, which combines a face detector with your saved model to create a facial keypoint detection system that can predict the keypoints on *any* face in an image!",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
]
] |
4a199561356694fbced8c624f7ad6b31ff240cad
| 16,627 |
ipynb
|
Jupyter Notebook
|
fairness_indicators/examples/Fairness_Indicators_on_TF_Hub_Text_Embeddings.ipynb
|
bogdanmecu/fairness-indicators
|
8b6f0c6146cb1493d914253159739fef545cc0f2
|
[
"Apache-2.0"
] | null | null | null |
fairness_indicators/examples/Fairness_Indicators_on_TF_Hub_Text_Embeddings.ipynb
|
bogdanmecu/fairness-indicators
|
8b6f0c6146cb1493d914253159739fef545cc0f2
|
[
"Apache-2.0"
] | null | null | null |
fairness_indicators/examples/Fairness_Indicators_on_TF_Hub_Text_Embeddings.ipynb
|
bogdanmecu/fairness-indicators
|
8b6f0c6146cb1493d914253159739fef545cc0f2
|
[
"Apache-2.0"
] | null | null | null | 36.542857 | 488 | 0.553618 |
[
[
[
"# Fairness Indicators on TF-Hub Text Embeddings\n\nIn this colab, you will learn how to use [Fairness Indicators](https://github.com/tensorflow/fairness-indicators) to evaluate embeddings from [TF Hub](https://www.tensorflow.org/hub). Fairness Indicators is a suite of tools that facilitates evaluation and visualization of fairness metrics on machine learning models. Fairness Indicators is built on top of [TensorFlow Model Analysis](https://www.tensorflow.org/tfx/guide/tfma), TensorFlow's official model evaluation library.\n",
"_____no_output_____"
],
[
"# Imports",
"_____no_output_____"
]
],
[
[
"!pip install fairness-indicators",
"_____no_output_____"
],
[
"%tensorflow_version 2.x\nimport os\nimport tempfile\nimport apache_beam as beam\nfrom datetime import datetime\nimport tensorflow as tf\nimport tensorflow_hub as hub\nimport tensorflow_model_analysis as tfma\nfrom tensorflow_model_analysis.addons.fairness.view import widget_view\nfrom tensorflow_model_analysis.addons.fairness.post_export_metrics import fairness_indicators\nfrom fairness_indicators import example_model\nfrom fairness_indicators.examples import util",
"_____no_output_____"
]
],
[
[
"# Defining Constants",
"_____no_output_____"
],
[
"TensorFlow parses features from data using [`FixedLenFeature`](https://www.tensorflow.org/api_docs/python/tf/io/FixedLenFeature) and [`VarLenFeature`](https://www.tensorflow.org/api_docs/python/tf/io/VarLenFeature). So to allow TensorFlow to parse our data, we will need to map out our input feature, output feature, and any slicing features that we will want to analyze via Fairness Indicators.",
"_____no_output_____"
]
],
[
[
"BASE_DIR = tempfile.gettempdir()\n\n# The input and output features of the classifier\nTEXT_FEATURE = 'comment_text'\nLABEL = 'toxicity'\n\nFEATURE_MAP = {\n # input and output features\n LABEL: tf.io.FixedLenFeature([], tf.float32),\n TEXT_FEATURE: tf.io.FixedLenFeature([], tf.string),\n\n # slicing features\n 'sexual_orientation': tf.io.VarLenFeature(tf.string),\n 'gender': tf.io.VarLenFeature(tf.string),\n 'religion': tf.io.VarLenFeature(tf.string),\n 'race': tf.io.VarLenFeature(tf.string),\n 'disability': tf.io.VarLenFeature(tf.string)\n}\n\nIDENTITY_TERMS = ['gender', 'sexual_orientation', 'race', 'religion', 'disability']",
"_____no_output_____"
]
],
[
[
"# Data\n\nIn this exercise, we'll work with the [Civil Comments dataset](https://www.kaggle.com/c/jigsaw-unintended-bias-in-toxicity-classification), approximately 2 million public comments made public by the [Civil Comments platform](https://github.com/reaktivstudios/civil-comments) in 2017 for ongoing research. This effort was sponsored by Jigsaw, who have hosted competitions on Kaggle to help classify toxic comments as well as minimize unintended model bias.\n\nEach individual text comment in the dataset has a toxicity label, with the label being 1 if the comment is toxic and 0 if the comment is non-toxic. Within the data, a subset of comments are labeled with a variety of identity attributes, including categories for gender, sexual orientation, religion, and race or ethnicity.\n\nYou can choose to download the original dataset and process it in the colab, which may take minutes, or you can download the preprocessed data.",
"_____no_output_____"
]
],
[
[
"download_original_data = True\n\nif download_original_data:\n train_tf_file = tf.keras.utils.get_file('train_tf.tfrecord',\n 'https://storage.googleapis.com/civil_comments_dataset/train_tf.tfrecord')\n validate_tf_file = tf.keras.utils.get_file('validate_tf.tfrecord',\n 'https://storage.googleapis.com/civil_comments_dataset/validate_tf.tfrecord')\n\n # The identity terms list will be grouped together by their categories\n # on threshould 0.5. Only the identity term column, text column,\n # and label column will be kept after processing.\n train_tf_file = util.convert_comments_data(train_tf_file)\n validate_tf_file = util.convert_comments_data(validate_tf_file)\n\nelse:\n train_tf_file = tf.keras.utils.get_file('train_tf_processed.tfrecord',\n 'https://storage.googleapis.com/civil_comments_dataset/train_tf_processed.tfrecord')\n validate_tf_file = tf.keras.utils.get_file('validate_tf_processed.tfrecord',\n 'https://storage.googleapis.com/civil_comments_dataset/validate_tf_processed.tfrecord')",
"_____no_output_____"
]
],
[
[
"# Creating a TensorFlow Model Analysis Pipeline\n\nThe Fairness Indicators library operates on [TensorFlow Model Analysis (TFMA) models](https://www.tensorflow.org/tfx/model_analysis/get_started). TFMA models wrap [TensorFlow models](https://www.tensorflow.org/guide/estimator) with additional functionality to evaluate and visualize their results. The actual evaluation occurs inside of an [Apache Beam pipeline](https://beam.apache.org/documentation/programming-guide/).\n\nSo we need to...\n1. Build a TensorFlow model.\n2. Build a TFMA model on top of the TensorFlow model.\n3. Run the model analysis in a Beam pipeline.",
"_____no_output_____"
],
[
"# Putting it all Together",
"_____no_output_____"
]
],
[
[
"def embedding_fairness_result(embedding, identity_term='gender'):\n \n model_dir = os.path.join(BASE_DIR, 'train',\n datetime.now().strftime('%Y%m%d-%H%M%S'))\n\n print(\"Training classifier for \" + embedding)\n classifier = example_model.train_model(model_dir,\n train_tf_file,\n LABEL,\n TEXT_FEATURE,\n FEATURE_MAP,\n embedding)\n\n # We need to create a unique path to store our results for this embedding.\n embedding_name = embedding.split('/')[-2]\n eval_result_path = os.path.join(BASE_DIR, 'eval_result', embedding_name)\n\n example_model.evaluate_model(classifier,\n validate_tf_file,\n eval_result_path,\n identity_term,\n LABEL,\n FEATURE_MAP)\n return tfma.load_eval_result(output_path=eval_result_path)",
"_____no_output_____"
]
],
[
[
"# Run TFMA & Fairness Indicators",
"_____no_output_____"
],
[
"## Fairness Indicators Metrics",
"_____no_output_____"
],
[
"Refer [here](https://github.com/tensorflow/fairness-indicators) for more information on Fairness Indicators. Below are some of the available metrics.\n\n* [Negative Rate, False Negative Rate (FNR), and True Negative Rate (TNR)](https://en.wikipedia.org/wiki/False_positives_and_false_negatives#False_positive_and_false_negative_rates)\n* [Positive Rate, False Positive Rate (FPR), and True Positive Rate (TPR)](https://en.wikipedia.org/wiki/False_positives_and_false_negatives#False_positive_and_false_negative_rates)\n* [Accuracy](https://www.tensorflow.org/api_docs/python/tf/keras/metrics/Accuracy)\n* [Precision and Recall](https://en.wikipedia.org/wiki/Precision_and_recall)\n* [Precision-Recall AUC](https://www.tensorflow.org/api_docs/python/tf/keras/metrics/AUC)\n* [ROC AUC](https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve)",
"_____no_output_____"
],
[
"## Text Embeddings",
"_____no_output_____"
],
[
"**[TF-Hub](https://www.tensorflow.org/hub)** provides several **text embeddings**. These embeddings will serve as the feature column for our different models. For this Colab, we use the following embeddings:\n\n* [**random-nnlm-en-dim128**](https://tfhub.dev/google/random-nnlm-en-dim128/1): random text embeddings, this serves as a convenient baseline.\n* [**nnlm-en-dim128**](https://tfhub.dev/google/nnlm-en-dim128/1): a text embedding based on [A Neural Probabilistic Language Model](http://www.jmlr.org/papers/volume3/bengio03a/bengio03a.pdf). \n* [**universal-sentence-encoder**](https://tfhub.dev/google/universal-sentence-encoder/2): a text embedding based on [Universal Sentence Encoder](https://arxiv.org/pdf/1803.11175.pdf).",
"_____no_output_____"
],
[
"## Fairness Indicator Results",
"_____no_output_____"
],
[
"For each of the above embeddings, we will compute fairness indicators with our `embedding_fairness_result` pipeline, and then render the results in the Fairness Indicator UI widget with `widget_view.render_fairness_indicator`.\n\nNote that the `widget_view.render_fairness_indicator` cells may need to be run twice for the visualization to be displayed.",
"_____no_output_____"
],
[
"#### Random NNLM",
"_____no_output_____"
]
],
[
[
"eval_result_random_nnlm = embedding_fairness_result('https://tfhub.dev/google/random-nnlm-en-dim128/1')",
"_____no_output_____"
],
[
"widget_view.render_fairness_indicator(eval_result_random_nnlm)",
"_____no_output_____"
]
],
[
[
"##### NNLM",
"_____no_output_____"
]
],
[
[
"eval_result_nnlm = embedding_fairness_result('https://tfhub.dev/google/nnlm-en-dim128/1')",
"_____no_output_____"
],
[
"widget_view.render_fairness_indicator(eval_result_nnlm)",
"_____no_output_____"
]
],
[
[
"##### Universal Sentence Encoder",
"_____no_output_____"
]
],
[
[
"eval_result_use = embedding_fairness_result('https://tfhub.dev/google/universal-sentence-encoder/2')",
"_____no_output_____"
],
[
"widget_view.render_fairness_indicator(eval_result_use)",
"_____no_output_____"
]
],
[
[
"## Exercises\n1. Pick an identity category, such as religion or sexual orientation, and look at False Positive Rate for the Universal Sentence Encoder. How do different slices compare to each other? How do they compare to the Overall baseline?\n2. Now pick a different identity category. Compare the results of this category with the previous one. Does the model weigh one category as more \"toxic\" than the other? Does this change with the embedding used?\n3. Does the model generally tend to overestimate or underestimate the number of toxic comments?\n4. Look at the graphs for different fairness metrics. Which metrics seem most informative? Which embeddings perform best and worst for that metric?\n\n",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
4a199ee65dcdba769f95e71d7c02e45371180224
| 102,516 |
ipynb
|
Jupyter Notebook
|
ipynb/SpellingBee.ipynb
|
muattiyah/pytudes
|
3242eccf6a83d6fffe761a57859b3455f2442383
|
[
"MIT"
] | null | null | null |
ipynb/SpellingBee.ipynb
|
muattiyah/pytudes
|
3242eccf6a83d6fffe761a57859b3455f2442383
|
[
"MIT"
] | null | null | null |
ipynb/SpellingBee.ipynb
|
muattiyah/pytudes
|
3242eccf6a83d6fffe761a57859b3455f2442383
|
[
"MIT"
] | null | null | null | 50.675235 | 13,144 | 0.672412 |
[
[
[
"<div style=\"text-align: right\" align=\"right\"><i>Peter Norvig, 3 Jan 2020</i></div>\n\n# Spelling Bee Puzzle\n\nThe [3 Jan. 2020 edition of the 538 Riddler](https://fivethirtyeight.com/features/can-you-solve-the-vexing-vexillology/) concerns the popular NYTimes [Spelling Bee](https://www.nytimes.com/puzzles/spelling-bee) puzzle:\n\n> In this game, seven letters are arranged in a **honeycomb** lattice, with one letter in the center. Here’s the lattice from Dec. 24, 2019:\n> \n> <img src=\"https://fivethirtyeight.com/wp-content/uploads/2020/01/Screen-Shot-2019-12-24-at-5.46.55-PM.png?w=1136\" width=\"150\">\n> \n> The goal is to identify as many words as possible that meet the following criteria:\n> 1. The word must be at least four letters long.\n> 2. The word must include the central letter.\n> 3. The word cannot include any letter beyond the seven given letters.\n>\n>Note that letters can be repeated. For example, the words GAME and AMALGAM are both acceptable words. Four-letter words are worth 1 point each, while five-letter words are worth 5 points, six-letter words are worth 6 points, seven-letter words are worth 7 points, etc. Words that use all of the seven letters in the honeycomb are known as **pangrams** and earn 7 bonus points (in addition to the points for the length of the word). So in the above example, MEGAPLEX is worth 15 points.\n>\n> ***Which seven-letter honeycomb results in the highest possible game score?*** To be a valid choice of seven letters, no letter can be repeated, it must not contain the letter S (that would be too easy) and there must be at least one pangram.\n>\n> For consistency, please use [this word list](https://norvig.com/ngrams/enable1.txt) to check your game score.\n\n\n\nSince the referenced [word list](https://norvig.com/ngrams/enable1.txt) came from *my* web site, I felt somewhat compelled to solve this one. (Note I didn't make up the word list; it is a standard Scrabble word list that I happen to host a copy of.) I'll show you how I address the problem, step by step:",
"_____no_output_____"
],
[
"# Step 1: Words, Word Scores, and Pangrams\n\nLet's start by defining some basics:\n\n- A **valid word** is a string of at least 4 letters, with no 'S', and not more than 7 distinct letters.\n- A **word list** is, well, a list of words.\n- A **pangram** is a word with exactly 7 distinct letters; it scores a **pangram bonus** of 7 points.\n- The **word score** is 1 for a four letter word, or the length of the word for longer words, plus any pangram bonus.\n",
"_____no_output_____"
]
],
[
[
"from typing import List, Set, Tuple, Dict\nfrom collections import Counter, defaultdict, namedtuple\nfrom itertools import combinations\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"Word = str # Type for a word\n\ndef valid(word) -> bool:\n \"\"\"Does word have at least 4 letters, no 'S', and no more than 7 distinct letters?\"\"\"\n return len(word) >= 4 and 'S' not in word and len(set(word)) <= 7\n\ndef valid_words(text, valid=valid) -> List[Word]: \n \"\"\"All the valid words in text.\"\"\"\n return [w for w in text.upper().split() if valid(w)]\n\ndef pangram_bonus(word) -> int: \n \"\"\"Does a word get a bonus for having 7 distinct letters?\"\"\"\n return 7 if len(set(word)) == 7 else 0\n\ndef word_score(word) -> int: \n \"\"\"The points for this word, including bonus for pangram.\"\"\"\n return 1 if len(word) == 4 else len(word) + pangram_bonus(word)",
"_____no_output_____"
]
],
[
[
"I'll make a mini word list to experiment with: ",
"_____no_output_____"
]
],
[
[
"mini = valid_words('game amalgam amalgamation glam gem gems em megaplex cacciatore erotica')\nmini",
"_____no_output_____"
]
],
[
[
"Note that `gem` and `em` are too short, `gems` has an `s` which is not allowed, and `amalgamation` has too many distinct letters (8). We're left with six valid words out of the ten candidate words. Here are examples of the other two functions in action:",
"_____no_output_____"
]
],
[
[
"{w for w in mini if pangram_bonus(w)}",
"_____no_output_____"
],
[
"{w: word_score(w) for w in mini}",
"_____no_output_____"
]
],
[
[
"# Step 2: Honeycombs and Game Scores\n\nIn a honeycomb the order of the letters doesn't matter; all that matters is:\n 1. The seven distinct letters in the honeycomb.\n 2. The one distinguished center letter.\n \nThus, we can represent a honeycomb as follows (I wanted to put in my own less verbose `__repr__` method):\n ",
"_____no_output_____"
]
],
[
[
"class Honeycomb(namedtuple('_', 'letters, center')):\n def __repr__(self): return f'Honeycomb({self.letters!r}, {self.center!r})'\n\nhc = Honeycomb('AEGLMPX', 'G')\nhc",
"_____no_output_____"
]
],
[
[
"The **game score** for a honeycomb is the sum of the word scores for all the words that the honeycomb can make. How do we know if a honeycomb can make a word? It can if (1) the word contains the honeycomb's center and (2) every letter in the word is in the honeycomb. ",
"_____no_output_____"
]
],
[
[
"def game_score(honeycomb, wordlist) -> int:\n \"\"\"The total score for this honeycomb.\"\"\"\n return sum(word_score(w) \n for w in wordlist if can_make(honeycomb, w))\n\ndef can_make(honeycomb, word) -> bool:\n \"\"\"Can the honeycomb make this word?\"\"\"\n letters, center = honeycomb\n return center in word and all(L in letters for L in word)",
"_____no_output_____"
],
[
"game_score(hc, mini)",
"_____no_output_____"
],
[
"{w: word_score(w) for w in mini if can_make(hc, w)}",
"_____no_output_____"
]
],
[
[
"# Step 3: Best Honeycomb\n\n\nHow many possible honeycombs are there? We can put any letter in the center, then any 6 letters around the outside (order doesn't matter); since the letter 'S' is not allowed, this gives a total of 25 × (24 choose 6) = 3,364,900 possible honeycombs. We could conceivably ask for the game score of every one of them and pick the best; that would probably take hours of computation (not seconds, and not days).\n\nHowever, a key constraint of the game is that **there must be at least one pangram** in the set of words that a valid honeycomb can make. That means that a valid honeycomb must ***be*** the set of seven letters in one of the pangram words in the word list, with any of the seven letters as the center. My approach to find the best (highest scoring) honeycomb is:\n\n * Go through all the words and find all the valid honeycombs: the 7-letter pangram letter sets, with any of the 7 letters as center.\n * Compute the game score for each valid honeycomb and return a honeycomb with maximal game score.",
"_____no_output_____"
]
],
[
[
"def best_honeycomb(words) -> Honeycomb: \n \"\"\"Return a honeycomb with highest game score on these words.\"\"\"\n return max(valid_honeycombs(words), \n key=lambda h: game_score(h, words))\n\ndef valid_honeycombs(words) -> List[Honeycomb]:\n \"\"\"Valid Honeycombs are the pangram lettersets, with any center.\"\"\"\n pangram_lettersets = {letterset(w) for w in words if pangram_bonus(w)}\n return [Honeycomb(letters, center) \n for letters in pangram_lettersets \n for center in letters]",
"_____no_output_____"
]
],
[
[
"I will represent a **set of letters** as a sorted string of distinct letters. Why not a Python `set` (or `frozenset` if we want it to be the key of a dict)? Because a string takes up less space in memory, and its printed representation is easier to read when debugging. Compare:\n- `frozenset({'A', 'E', 'G', 'L', 'M', 'P', 'X'})`\n- `'AEGLMPX'`\n\nI'll use the name `letterset` for the function that converts a word to a set of letters, and `Letterset` for the resulting type:",
"_____no_output_____"
]
],
[
[
"Letterset = str # Type for a set of letters, like \"AGLM\"\n\ndef letterset(word) -> Letterset:\n \"\"\"The set of letters in a word, represented as a sorted str.\"\"\"\n return ''.join(sorted(set(word)))",
"_____no_output_____"
],
[
"{w: letterset(w) for w in mini}",
"_____no_output_____"
]
],
[
[
"Note that 'AMALGAM' and 'GLAM' have the same letterset, as do 'CACCIATORE' and 'EROTICA'.",
"_____no_output_____"
]
],
[
[
"valid_honeycombs(mini)",
"_____no_output_____"
],
[
"best_honeycomb(mini)",
"_____no_output_____"
]
],
[
[
"**We're done!** We know how to find the best honeycomb. But so far, we've only done it for the mini word list. \n\n# Step 4: The enable1 Word List\n\nHere's the real word list, `enable1.txt`, and some counts derived from it:",
"_____no_output_____"
]
],
[
[
"! [ -e enable1.txt ] || curl -O http://norvig.com/ngrams/enable1.txt\n! wc -w enable1.txt",
" 172820 enable1.txt\r\n"
],
[
"enable1 = valid_words(open('enable1.txt').read())\nlen(enable1)",
"_____no_output_____"
],
[
"pangrams = [w for w in enable1 if pangram_bonus(w)]\nlen(pangrams)",
"_____no_output_____"
],
[
"len({letterset(w) for w in pangrams}) # pangram lettersets",
"_____no_output_____"
],
[
"len(valid_honeycombs(enable1))",
"_____no_output_____"
]
],
[
[
"To summarize, there are:\n\n- 172,820 words in the `enable1` word list\n- 44,585 valid Spelling Bee words\n- 14,741 pangram words \n- 7,986 distinct pangram lettersets\n- 55,902 (7 × 7,986) valid pangram-containing honeycombs\n\nHow long will it take to run `best_honeycomb(enable1)`? Most of the computation time is in `game_score` (which has to look at all 44,585 valid words), so let's estimate the total time by first checking how long it takes to compute the game score of a single honeycomb:",
"_____no_output_____"
]
],
[
[
"%time game_score(hc, enable1)",
"CPU times: user 9.34 ms, sys: 31 µs, total: 9.37 ms\nWall time: 9.36 ms\n"
]
],
[
[
"Roughly 10 milliseconds on my computer (this may vary). How many minutes would it be to run `game_score` for all 55,902 valid honeycombs?",
"_____no_output_____"
]
],
[
[
"55902 * 10/1000 / 60",
"_____no_output_____"
]
],
[
[
"About 9 or 10 minutes. I could run `best_honeycomb(enable1)` right now and take a coffee break until it completes, but I think that a puzzle like this deserves a more elegant solution. I'd like to get the run time under a minute (as is suggested in [Project Euler](https://projecteuler.net/)), and I have an idea how to do it.\n\n# Step 5: Faster Algorithm: Points Table\n\nHere's my plan for a more efficient program:\n\n1. Keep the same strategy of trying every pangram letterset, but do some precomputation that will make `game_score` much faster.\n1. The precomputation is: compute the `letterset` and `word_score` for each word, and make a table of `{letterset: total_points}` giving the total number of word score points for all the words that correspond to each letterset. I call this a **points table**.\n3. These calculations are independent of the honeycomb, so they need to be done only once, not 55,902 times. \n4. `game_score2` (the name is changed because the interface has changed) takes a honeycomb and a points table as input. The idea is that every word that the honeycomb can make must have a letterset that is the same as a valid **letter subset** of the honeycomb. A valid letter subset must include the center letter, and it may or may not include each of the other 6 letters, so there are exactly $2^6 = 64$ valid letter subsets. (The function `letter_subsets(honeycomb)` computes these.)\nThe result of `game_score2` is the sum of the honeycomb's 64 letter subset entries in the points table.\n\n\nThat means that in `game_score2` we no longer need to iterate over 44,585 words and check if each word is a subset of the honeycomb. Instead we iterate over the 64 subsets of the honeycomb and for each one check—in one table lookup—whether it is a word (or more than word) and how many total points those word(s) score. Since 64 < 44,585, that's a nice optimization!\n\n\nHere's the code:",
"_____no_output_____"
]
],
[
[
"PointsTable = Dict[Letterset, int]\n\ndef best_honeycomb(words) -> Honeycomb: \n \"\"\"Return a honeycomb with highest game score on these words.\"\"\"\n points_table = tabulate_points(words)\n honeycombs = (Honeycomb(letters, center) \n for letters in points_table if len(letters) == 7 \n for center in letters)\n return max(honeycombs, key=lambda h: game_score2(h, points_table))\n\ndef tabulate_points(words) -> PointsTable:\n \"\"\"Return a Counter of {letterset: points} from words.\"\"\"\n table = Counter()\n for w in words:\n table[letterset(w)] += word_score(w)\n return table\n\ndef letter_subsets(honeycomb) -> List[Letterset]:\n \"\"\"The 64 subsets of the letters in the honeycomb, always including the center letter.\"\"\"\n return [letters \n for n in range(1, 8) \n for letters in map(''.join, combinations(honeycomb.letters, n))\n if honeycomb.center in letters]\n\ndef game_score2(honeycomb, points_table) -> int:\n \"\"\"The total score for this honeycomb, using a points table.\"\"\"\n return sum(points_table[letterset] for letterset in letter_subsets(honeycomb))",
"_____no_output_____"
]
],
[
[
"Let's get a feel for how this works. \n\nFirst `letter_subsets` (a 4-letter honeycomb makes $2^3 = 8$ subsets; 7-letter honeycombs make $2^6 = 64$):",
"_____no_output_____"
]
],
[
[
"letter_subsets(Honeycomb('GLAM', 'G')) ",
"_____no_output_____"
],
[
"mini # Remind me again what the mini word list is?",
"_____no_output_____"
]
],
[
[
"Now `tabulate_points`:",
"_____no_output_____"
]
],
[
[
"tabulate_points(mini)",
"_____no_output_____"
]
],
[
[
"The letterset `'AGLM'` gets 8 points, 7 for AMALGAM and 1 for GLAM. `'ACEIORT'` gets 31 points, 17 for CACCIATORE and 14 for EROTICA. The other lettersets represent one word each. \n\nLet's make sure we haven't broken the `best_honeycomb` function:",
"_____no_output_____"
]
],
[
[
"assert best_honeycomb(mini) == Honeycomb('ACEIORT', 'A')",
"_____no_output_____"
]
],
[
[
"# Step 6: The Solution",
"_____no_output_____"
],
[
"Finally, the solution to the puzzle:",
"_____no_output_____"
]
],
[
[
"%time best = best_honeycomb(enable1)",
"CPU times: user 1.71 s, sys: 1.2 ms, total: 1.71 s\nWall time: 1.71 s\n"
],
[
"best, game_score(best, enable1)",
"_____no_output_____"
]
],
[
[
"**Wow! 3898 is a high score!** \n\nAnd it took less than 2 seconds of computation to find the best honeycomb!",
"_____no_output_____"
],
[
"# Step 7: Even Faster Algorithm: Branch and Bound\n\nA run time of 2 seconds is pretty good! But what if the word list were 100 times bigger? What if a honeycomb had 12 letters around the outside, not just 6? We might still be looking for ideas to speed up the computation. I happen to have one.\n\nConsider the word 'EQUIVOKE'. It is a pangram, but what with the 'Q' and 'V' and 'K', it is not a high-scoring honeycomb, regardless of what center is used:",
"_____no_output_____"
]
],
[
[
"{C: game_score(Honeycomb('EIKOQUV', C), enable1)\n for C in 'EQUIVOKE'}",
"_____no_output_____"
]
],
[
[
"It would be great if we could eliminate all seven of these honeycombs at once, rather than trying each one in turn. So my idea is to:\n- Keep track of the best honeycomb and best score found so far.\n- For each new pangram letterset, ask \"if we weren't required to use the center letter, would this letterset score higher than the best honeycomb so far?\" \n- If yes, then try it with all seven centers; if not then discard it immediately.\n- This is called a [**branch and bound**](https://en.wikipedia.org/wiki/Branch_and_bound) algorithm: if an **upper bound** of the new letterset's score can't beat the best honeycomb so far, then we prune a whole **branch** of the search tree consisting of the seven honeycombs that have that letterset.\n\nWhat would the score of a letterset be if we weren't required to use the center letter? It turns out I can make a dummy Honeycomb and specify the empty string for the center, `Honeycomb(letters, '')`, and call `game_score2` on that. This works because of a quirk of Python: we ask if `honeycomb.center in letters`; normally in Python the expression `x in y` means \"is `x` a member of the collection `y`\", but when `y` is a string it means \"is `x` a substring of `y`\", and the empty string is a substring of every string. (If I had represented a letterset as a Python `set`, this wouldn't work.)\n\nThus, I can rewrite `best_honeycomb` as follows:",
"_____no_output_____"
]
],
[
[
"def best_honeycomb2(words) -> Honeycomb: \n \"\"\"Return a honeycomb with highest game score on these words.\"\"\"\n points_table = tabulate_points(words)\n best, best_score = None, 0\n pangrams = (s for s in points_table if len(s) == 7)\n for p in pangrams:\n if game_score2(Honeycomb(p, ''), points_table) > best_score:\n for center in p:\n honeycomb = Honeycomb(p, center)\n score = game_score2(honeycomb, points_table)\n if score > best_score:\n best, best_score = honeycomb, score\n return best",
"_____no_output_____"
],
[
"%time best_honeycomb2(enable1)",
"CPU times: user 393 ms, sys: 1.31 ms, total: 395 ms\nWall time: 394 ms\n"
]
],
[
[
"Same honeycomb for the answer, but four times faster—less than half a second.\n\n# Step 8: Curiosity\n\nI'm curious about a bunch of things.\n\n### What's the highest-scoring individual word?",
"_____no_output_____"
]
],
[
[
"max(enable1, key=word_score)",
"_____no_output_____"
]
],
[
[
"### What are some of the pangrams?",
"_____no_output_____"
]
],
[
[
"pangrams[::500] # Every five-hundreth pangram",
"_____no_output_____"
]
],
[
[
"### What's the breakdown of reasons why words are invalid?",
"_____no_output_____"
]
],
[
[
"Counter('has S' if 'S' in w else \n '< 4' if len(w) < 4 else \n '> 7' if len(set(w)) > 7 else \n 'valid'\n for w in valid_words(open('enable1.txt').read(), lambda w: True))",
"_____no_output_____"
]
],
[
[
"There are more than twice as many words with an 'S' as there are valid words.\n\n### About the points table: How many different letter subsets are there? ",
"_____no_output_____"
]
],
[
[
"pts = tabulate_points(enable1)\nlen(pts)",
"_____no_output_____"
]
],
[
[
"That means there's about two valid words for each letterset.\n\n### Which letter subsets score the most?",
"_____no_output_____"
]
],
[
[
"pts.most_common(10)",
"_____no_output_____"
]
],
[
[
"The best honeycomb, `'AEGINRT`, is also the highest scoring letter subset on its own (although it only gets 832 of the 3,898 total points from using all seven letters).",
"_____no_output_____"
],
[
"### How many honeycombs does `best_honeycomb2` consider?\n\nWe know that `best_honeycomb` considers 7,986 × 7 = 55,902 honeycombs. How many does `best_honeycomb2` consider? We can answer that by wrapping `Honeycomb` with a decorator that counts calls:",
"_____no_output_____"
]
],
[
[
"def call_counter(fn):\n \"Return a function that calls fn, and increments a counter on each call.\"\n def wrapped(*args, **kwds):\n wrapped.call_counter += 1\n return fn(*args, **kwds)\n wrapped.call_counter = 0\n return wrapped\n \nHoneycomb = call_counter(Honeycomb)\n\nbest = best_honeycomb2(enable1)\nHoneycomb.call_counter",
"_____no_output_____"
]
],
[
[
"Only 8,084 honeycombs are considered. That means that most pangrams are only considered once; for only 14 pangrams do we consider all seven centers.",
"_____no_output_____"
]
],
[
[
"(8084 - 7986) / 7",
"_____no_output_____"
]
],
[
[
"# Step 9: Fancy Report\n\nI'd like to see the actual words that each honeycomb can make, in addition to the total score, and I'm curious about how the words are divided up by letterset. Here's a function to provide such a report. I remembered that there is a `fill` function in Python (it is in the `textwrap` module) but this turned out to be a lot more complicated than I expected. I guess it is difficult to create a practical extraction and reporting tool. I feel you, [Larry Wall](http://www.wall.org/~larry/).",
"_____no_output_____"
]
],
[
[
"from textwrap import fill\n\ndef report(honeycomb=None, words=enable1):\n \"\"\"Print stats, words, and word scores for the given honeycomb (or the best\n honeycomb if no honeycomb is given) over the given word list.\"\"\"\n bins = group_by(words, letterset)\n adj = (\"best \" if honeycomb is None else \"\")\n honeycomb = honeycomb or best_honeycomb(words)\n points = game_score(honeycomb, words)\n subsets = letter_subsets(honeycomb)\n nwords = sum(len(bins[s]) for s in subsets)\n print(f'The {adj}{honeycomb} scores {Ns(points, \"point\")} on {Ns(nwords, \"word\")}',\n f'from a {len(words)} word list:\\n')\n for s in sorted(subsets, key=lambda s: (-len(s), s)):\n if bins[s]:\n pts = sum(word_score(w) for w in bins[s])\n wcount = Ns(len(bins[s]), \"pangram\" if len(s) == 7 else \"word\")\n intro = f'{s:>7} {Ns(pts, \"point\"):>10} {wcount:>8} '\n words = [f'{w}:{word_score(w)}' for w in sorted(bins[s])]\n print(fill(' '.join(words), width=110, \n initial_indent=intro, subsequent_indent=' '*8))\n \ndef Ns(n, noun):\n \"\"\"A string with `n` followed by the plural or singular of noun:\n Ns(3, 'bear') => '3 bears'; Ns(1, 'world') => '1 world'\"\"\" \n return f\"{n:d} {noun}{' ' if n == 1 else 's'}\"\n\ndef group_by(items, key):\n \"Group items into bins of a dict, each bin keyed by key(item).\"\n bins = defaultdict(list)\n for item in items:\n bins[key(item)].append(item)\n return bins",
"_____no_output_____"
],
[
"report(hc, mini)",
"The Honeycomb('AEGLMPX', 'G') scores 24 points on 4 words from a 6 word list:\n\nAEGLMPX 15 points 1 pangram MEGAPLEX:15\n AEGM 1 point 1 word GAME:1\n AGLM 8 points 2 words AMALGAM:7 GLAM:1\n"
],
[
"report()",
"The best Honeycomb('AEGINRT', 'R') scores 3898 points on 537 words from a 44585 word list:\n\nAEGINRT 832 points 50 pangrams AERATING:15 AGGREGATING:18 ARGENTINE:16 ARGENTITE:16 ENTERTAINING:19\n ENTRAINING:17 ENTREATING:17 GARNIERITE:17 GARTERING:16 GENERATING:17 GNATTIER:15 GRANITE:14 GRATINE:14\n GRATINEE:15 GRATINEEING:18 GREATENING:17 INGRATE:14 INGRATIATE:17 INTEGRATE:16 INTEGRATING:18\n INTENERATING:19 INTERAGE:15 INTERGANG:16 INTERREGNA:17 INTREATING:17 ITERATING:16 ITINERATING:18\n NATTERING:16 RATTENING:16 REAGGREGATING:20 REATTAINING:18 REGENERATING:19 REGRANTING:17 REGRATING:16\n REINITIATING:19 REINTEGRATE:18 REINTEGRATING:20 REITERATING:18 RETAGGING:16 RETAINING:16\n RETARGETING:18 RETEARING:16 RETRAINING:17 RETREATING:17 TANGERINE:16 TANGIER:14 TARGETING:16\n TATTERING:16 TEARING:14 TREATING:15\n AEGINR 270 points 35 words AGINNER:7 AGREEING:8 ANEARING:8 ANERGIA:7 ANGERING:8 ANGRIER:7 ARGININE:8 EARING:6\n EARNING:7 EARRING:7 ENGRAIN:7 ENGRAINING:10 ENRAGING:8 GAINER:6 GANGRENING:10 GARNERING:9 GEARING:7\n GRAINER:7 GRAINIER:8 GRANNIE:7 GREGARINE:9 NAGGIER:7 NEARING:7 RANGIER:7 REAGIN:6 REARING:7\n REARRANGING:11 REEARNING:9 REENGAGING:10 REGAIN:6 REGAINER:8 REGAINING:9 REGEARING:9 REGINA:6\n REGINAE:7\n AEGIRT 34 points 5 words AIGRET:6 AIGRETTE:8 GAITER:6 IRRIGATE:8 TRIAGE:6\n AEGNRT 94 points 13 words ARGENT:6 GARNET:6 GENERATE:8 GRANTEE:7 GRANTER:7 GREATEN:7 NEGATER:7 REAGENT:7\n REGENERATE:10 REGNANT:7 REGRANT:7 TANAGER:7 TEENAGER:8\n AEINRT 232 points 30 words ARENITE:7 ATTAINER:8 ENTERTAIN:9 ENTERTAINER:11 ENTRAIN:7 ENTRAINER:9 INERRANT:8\n INERTIA:7 INERTIAE:8 INTENERATE:10 INTREAT:7 ITERANT:7 ITINERANT:9 ITINERATE:9 NATTIER:7 NITRATE:7\n RATINE:6 REATTAIN:8 REINITIATE:10 RETAIN:6 RETAINER:8 RETINA:6 RETINAE:7 RETIRANT:8 RETRAIN:7\n TERRAIN:7 TERTIAN:7 TRAINEE:7 TRAINER:7 TRIENNIA:8\n AGINRT 167 points 21 words AIRTING:7 ATTIRING:8 GRANITA:7 GRANTING:8 GRATIN:6 GRATING:7 INGRATIATING:12\n INTRIGANT:9 IRRIGATING:10 IRRITATING:10 NARRATING:9 NITRATING:9 RANTING:7 RATING:6 RATTING:7 TARING:6\n TARRING:7 TARTING:7 TITRATING:9 TRAINING:8 TRIAGING:8\n EGINRT 218 points 26 words ENGIRT:6 ENTERING:8 GETTERING:9 GITTERN:7 GREETING:8 IGNITER:7 INTEGER:7\n INTERNING:9 INTERRING:9 REENTERING:10 REGREETING:10 REGRETTING:10 REIGNITE:8 REIGNITING:10\n REINTERRING:11 RENTING:7 RETINTING:9 RETIRING:8 RETTING:7 RINGENT:7 TEETERING:9 TENTERING:9 TIERING:7\n TITTERING:9 TREEING:7 TRIGGERING:10\n AEGNR 120 points 18 words ANGER:5 ARRANGE:7 ARRANGER:8 ENGAGER:7 ENRAGE:6 GANGER:6 GANGRENE:8 GARNER:6\n GENERA:6 GRANGE:6 GRANGER:7 GREENGAGE:9 NAGGER:6 RANGE:5 RANGER:6 REARRANGE:9 REENGAGE:8 REGNA:5\n AEGRT 123 points 19 words AGGREGATE:9 ERGATE:6 ETAGERE:7 GARGET:6 GARRET:6 GARTER:6 GRATE:5 GRATER:6 GREAT:5\n GREATER:7 REAGGREGATE:11 REGATTA:7 REGRATE:7 RETAG:5 RETARGET:8 TAGGER:6 TARGE:5 TARGET:6 TERGA:5\n AEINR 19 points 3 words INANER:6 NARINE:6 RAINIER:7\n AEIRT 135 points 20 words ARIETTA:7 ARIETTE:7 ARTIER:6 ATTIRE:6 ATTRITE:7 IRATE:5 IRATER:6 IRRITATE:8\n ITERATE:7 RATITE:6 RATTIER:7 REITERATE:9 RETIA:5 RETIARII:8 TARRIER:7 TATTIER:7 TEARIER:7 TERAI:5\n TERRARIA:8 TITRATE:7\n AENRT 132 points 19 words ANTEATER:8 ANTRE:5 ENTERA:6 ENTRANT:7 ENTREAT:7 ERRANT:6 NARRATE:7 NARRATER:8\n NATTER:6 NEATER:6 RANTER:6 RATTEEN:7 RATTEN:6 RATTENER:8 REENTRANT:9 RETREATANT:10 TANNER:6 TERNATE:7\n TERRANE:7\n AGINR 138 points 19 words AGRARIAN:8 AIRING:6 ANGARIA:7 ARRAIGN:7 ARRAIGNING:10 ARRANGING:9 GARAGING:8\n GARNI:5 GARRING:7 GNARRING:8 GRAIN:5 GRAINING:8 INGRAIN:7 INGRAINING:10 RAGGING:7 RAGING:6 RAINING:7\n RANGING:7 RARING:6\n AGIRT 5 points 1 word TRAGI:5\n AGNRT 5 points 1 word GRANT:5\n AINRT 64 points 9 words ANTIAIR:7 ANTIAR:6 ANTIARIN:8 INTRANT:7 IRRITANT:8 RIANT:5 TITRANT:7 TRAIN:5\n TRINITARIAN:11\n EGINR 186 points 24 words ENGINEER:8 ENGINEERING:11 ERRING:6 GINGER:6 GINGERING:9 GINNER:6 GINNIER:7\n GREEING:7 GREENIE:7 GREENIER:8 GREENING:8 GRINNER:7 NIGGER:6 REENGINEER:10 REENGINEERING:13\n REGREENING:10 REIGN:5 REIGNING:8 REINING:7 RENEGING:8 RENIG:5 RENIGGING:9 RERIGGING:9 RINGER:6\n EGIRT 27 points 4 words GRITTIER:8 TERGITE:7 TIGER:5 TRIGGER:7\n EGNRT 12 points 2 words GERENT:6 REGENT:6\n EINRT 190 points 29 words ENTIRE:6 INERT:5 INTER:5 INTERN:6 INTERNE:7 INTERNEE:8 INTERTIE:8 NETTIER:7\n NITER:5 NITERIE:7 NITRE:5 NITRITE:7 NITTIER:7 REINTER:7 RENITENT:8 RENTIER:7 RETINE:6 RETINENE:8\n RETINITE:8 RETINT:6 TEENIER:7 TENTIER:7 TERRINE:7 TINIER:6 TINNER:6 TINNIER:7 TINTER:6 TRIENE:6\n TRINE:5\n GINRT 43 points 6 words GIRTING:7 GRITTING:8 RINGGIT:7 TIRING:6 TRIGGING:8 TRINING:7\n AEGR 84 points 17 words AGER:1 AGGER:5 AGREE:5 ARREARAGE:9 EAGER:5 EAGERER:7 EAGRE:5 EGGAR:5 GAGER:5\n GAGGER:6 GARAGE:6 GEAR:1 RAGE:1 RAGEE:5 RAGGEE:6 REGEAR:6 REGGAE:6\n AEIR 22 points 4 words AERIE:5 AERIER:6 AIRER:5 AIRIER:6\n AENR 40 points 9 words ANEAR:5 ARENA:5 EARN:1 EARNER:6 NEAR:1 NEARER:6 RANEE:5 REEARN:6 RERAN:5\n AERT 127 points 24 words AERATE:6 ARETE:5 EATER:5 ERRATA:6 RATE:1 RATER:5 RATTER:6 REATA:5 RETEAR:6\n RETREAT:7 RETREATER:9 TARE:1 TARRE:5 TARTER:6 TARTRATE:8 TATER:5 TATTER:6 TEAR:1 TEARER:6 TERRA:5\n TERRAE:6 TETRA:5 TREAT:5 TREATER:7\n AGIR 6 points 2 words AGRIA:5 RAGI:1\n AGNR 13 points 5 words GNAR:1 GNARR:5 GRAN:1 GRANA:5 RANG:1\n AGRT 13 points 3 words GRAT:1 RAGTAG:6 TAGRAG:6\n AINR 8 points 4 words AIRN:1 NAIRA:5 RAIN:1 RANI:1\n AIRT 21 points 5 words AIRT:1 ATRIA:5 RIATA:5 TIARA:5 TRAIT:5\n ANRT 50 points 10 words ANTRA:5 ARRANT:6 RANT:1 RATAN:5 RATTAN:6 TANTARA:7 TANTRA:6 TARN:1 TARTAN:6\n TARTANA:7\n EGIR 17 points 3 words GREIGE:6 RERIG:5 RIGGER:6\n EGNR 37 points 6 words GENRE:5 GREEN:5 GREENER:7 REGREEN:7 RENEGE:6 RENEGER:7\n EGRT 45 points 7 words EGRET:5 GETTER:6 GREET:5 GREETER:7 REGREET:7 REGRET:6 REGRETTER:9\n EINR 17 points 4 words INNER:5 REIN:1 RENIN:5 RENNIN:6\n EIRT 87 points 17 words RETIE:5 RETIRE:6 RETIREE:7 RETIRER:7 RITE:1 RITTER:6 TERRIER:7 TERRIT:6 TIER:1\n TIRE:1 TITER:5 TITRE:5 TITTER:6 TITTERER:8 TRIER:5 TRITE:5 TRITER:6\n ENRT 104 points 19 words ENTER:5 ENTERER:7 ENTREE:6 ETERNE:6 NETTER:6 REENTER:7 RENNET:6 RENT:1 RENTE:5\n RENTER:6 RETENE:6 TEENER:6 TENNER:6 TENTER:6 TERN:1 TERNE:5 TERREEN:7 TERRENE:7 TREEN:5\n GINR 44 points 9 words GIRN:1 GIRNING:7 GRIN:1 GRINNING:8 IRING:5 RIGGING:7 RING:1 RINGING:7 RINNING:7\n GIRT 3 points 3 words GIRT:1 GRIT:1 TRIG:1\n AER 25 points 7 words AREA:1 AREAE:5 ARREAR:6 RARE:1 RARER:5 REAR:1 REARER:6\n AGR 2 points 2 words AGAR:1 RAGA:1\n AIR 2 points 2 words ARIA:1 RAIA:1\n ART 24 points 5 words ATTAR:5 RATATAT:7 TART:1 TARTAR:6 TATAR:5\n EGR 15 points 4 words EGER:1 EGGER:5 GREE:1 GREEGREE:8\n EIR 11 points 2 words EERIE:5 EERIER:6\n ENR 1 point 1 word ERNE:1\n ERT 27 points 7 words RETE:1 TEETER:6 TERETE:6 TERRET:6 TETTER:6 TREE:1 TRET:1\n GIR 7 points 2 words GRIG:1 GRIGRI:6\n"
]
],
[
[
"# Step 10: What honeycombs have a high score without a lot of words?\n\nMichael Braverman said he dislikes puzzles with a lot of low-scoring four-letter words. Can we find succint puzzles with lots of points but few words? With two objectives there won't be a single best answer to this question; rather we can ask: what honeycombs are there such that there are no other honeycombs with both more points and fewer words? We say such honeycombs are [**Pareto optimal**](https://en.wikipedia.org/wiki/Pareto_efficiency) and are on the **Pareto frontier**. We can find them as follows:",
"_____no_output_____"
]
],
[
[
"def pareto_honeycombs(words) -> list: \n \"\"\"A table of {word_count: (points, honeycomb)} with highest scoring honeycomb.\"\"\"\n points_table = tabulate_points(words)\n wcount_table = Counter(map(letterset, words))\n honeycombs = (Honeycomb(letters, center) \n for letters in points_table if len(letters) == 7 \n for center in letters)\n # Build a table of {word_count: (points, honeycomb)}\n table = defaultdict(lambda: (0, None)) \n for h in honeycombs:\n points = game_score2(h, points_table)\n wcount = game_score2(h, wcount_table)\n table[wcount] = max(table[wcount], (points, h))\n return pareto_frontier(table)\n \ndef pareto_frontier(table) -> list:\n \"\"\"The pareto frontier that minimizes word counts while maximizing points.\n Returns a list of (wcount, points, honeycomb, points/wcount) entries\n such that there is no other entry that has fewer words and more points.\"\"\"\n return [(w, p, h, round(p/w, 2))\n for w, (p, h) in sorted(table.items())\n if not any(h2 != h and w2 <= w and p2 >= p\n for w2, (p2, h2) in table.items())]",
"_____no_output_____"
],
[
"ph = pareto_honeycombs(enable1)\nlen(ph)",
"_____no_output_____"
]
],
[
[
"So there are 108 (out of 55,902) honeycombs on the Pareto frontier. We can see the first ten (sorted by word count), and every tenth one after that:",
"_____no_output_____"
]
],
[
[
"ph[:10] # (word count, points, honeycomb, points/wcount) ",
"_____no_output_____"
],
[
"ph[10::10]",
"_____no_output_____"
]
],
[
[
"Let's see what the frontier looks like by plotting word counts versus points scored:",
"_____no_output_____"
]
],
[
[
"W, P, H, PPW = zip(*ph)\n\ndef plot(xlabel, X, ylabel, Y): \n plt.plot(X, Y, '.'); plt.xlabel(xlabel); plt.ylabel(ylabel); plt.grid(True)\n \nplot('Word count', W, 'Points', P, )",
"_____no_output_____"
]
],
[
[
"That's somewhat surprising; usually a Pareto frontier looks like a quarter-circle; here it looks like an almost straight line. Maybe we can get a better view by plotting word counts versus the number of points per word:",
"_____no_output_____"
]
],
[
[
"plot('Word count', W, 'Points per word', PPW)",
"_____no_output_____"
]
],
[
[
"We can see all the Pareto optimal honeycombs that score more than, say, 7.6 points per word:",
"_____no_output_____"
]
],
[
[
"[entry for entry in ph if entry[-1] > 7.6]",
"_____no_output_____"
]
],
[
[
"The last few honeycombs on the right-hand side all rise above the average points/word. We can see that they are all variants of the highest-scoring honeycomb, but with different centers:",
"_____no_output_____"
]
],
[
[
"ph[-5:]",
"_____no_output_____"
]
],
[
[
"Here are reports on what I think are the most interesting low-word-count, higher-score honeycombs. I would have scored zero on the first one, and probably not much better on the second.",
"_____no_output_____"
]
],
[
[
"report(Honeycomb('CEGIPTX', 'G'))",
"The Honeycomb('CEGIPTX', 'G') scores 45 points on 5 words from a 44585 word list:\n\nCEGIPTX 17 points 1 pangram EPEXEGETIC:17\n CEGITX 8 points 1 word EXEGETIC:8\n CEGIP 7 points 1 word EPIGEIC:7\n EGIP 6 points 1 word PIGGIE:6\n EGTX 7 points 1 word EXEGETE:7\n"
],
[
"report(Honeycomb('DEIORXZ', 'X'))",
"The Honeycomb('DEIORXZ', 'X') scores 157 points on 19 words from a 44585 word list:\n\nDEIORXZ 65 points 4 pangrams DEOXIDIZER:17 OXIDIZER:15 REOXIDIZE:16 REOXIDIZED:17\n DEIOXZ 34 points 4 words DEOXIDIZE:9 DEOXIDIZED:10 OXIDIZE:7 OXIDIZED:8\n DEIOX 23 points 4 words DIOXIDE:7 DOXIE:5 EXODOI:6 OXIDE:5\n DEORX 12 points 2 words REDOX:5 XEROXED:7\n DEIX 5 points 1 word DEXIE:5\n DIOX 13 points 3 words DIOXID:6 IXODID:6 OXID:1\n EORX 5 points 1 word XEROX:5\n"
]
],
[
[
"The following I think are decent puzzles:",
"_____no_output_____"
]
],
[
[
"report(Honeycomb('ACINOTV', 'V'))",
"The Honeycomb('ACINOTV', 'V') scores 374 points on 45 words from a 44585 word list:\n\nACINOTV 171 points 10 pangrams ACTIVATION:17 AVOCATION:16 CAVITATION:17 CONVOCATION:18 INACTIVATION:19\n INVOCATION:17 VACATION:15 VACCINATION:18 VATICINATION:19 VOCATION:15\n ACINOV 7 points 1 word AVIONIC:7\n ACINTV 8 points 1 word CAVATINA:8\n AINOTV 62 points 7 words AVIATION:8 INNOVATION:10 INVITATION:10 NOVATION:8 OVATION:7 TITIVATION:10\n VITIATION:9\n CINOTV 17 points 2 words CONVICT:7 CONVICTION:10\n ACINV 20 points 3 words VACCINA:7 VACCINIA:8 VINCA:5\n ACITV 24 points 4 words ATAVIC:6 VATIC:5 VIATIC:6 VIATICA:7\n ACNTV 6 points 1 word VACANT:6\n ACOTV 6 points 1 word OCTAVO:6\n AINOV 5 points 1 word AVION:5\n CINOV 11 points 2 words COVIN:5 OVONIC:6\n AINV 7 points 3 words AVIAN:5 VAIN:1 VINA:1\n AITV 6 points 2 words VITA:1 VITTA:5\n ANOV 1 point 1 word NOVA:1\n ANTV 5 points 1 word AVANT:5\n AOTV 6 points 1 word OTTAVA:6\n CINV 5 points 1 word VINIC:5\n INOV 1 point 1 word VINO:1\n AIV 1 point 1 word VIVA:1\n CIV 5 points 1 word CIVIC:5\n"
],
[
"report(Honeycomb('ACINOTU', 'U'))",
"The Honeycomb('ACINOTU', 'U') scores 385 points on 55 words from a 44585 word list:\n\nACINOTU 162 points 10 pangrams ACTUATION:16 ANNUNCIATION:19 AUCTION:14 CAUTION:14 CONTINUA:15 CONTINUANT:17\n CONTINUATION:19 COUNTIAN:15 CUNCTATION:17 INCAUTION:16\n ACINTU 6 points 1 word TUNICA:6\n ACNOTU 31 points 4 words ACCOUNT:7 ACCOUNTANT:10 COCOANUT:8 TOUCAN:6\n AINOTU 17 points 2 words ANTIUNION:9 NUTATION:8\n CINOTU 24 points 3 words CONTINUO:8 INUNCTION:9 UNCTION:7\n ACINU 5 points 1 word UNCIA:5\n ACOTU 6 points 1 word OUTACT:6\n AINTU 9 points 1 word ANNUITANT:9\n CINOU 13 points 2 words INCONNU:7 NUNCIO:6\n CINTU 10 points 2 words CUTIN:5 TUNIC:5\n CNOTU 20 points 3 words COCONUT:7 COUNT:5 OUTCOUNT:8\n INOTU 16 points 2 words INTUITION:9 TUITION:7\n AINU 1 point 1 word UNAI:1\n ANTU 13 points 4 words AUNT:1 NUTANT:6 TAUNT:5 TUNA:1\n AOTU 1 point 1 word AUTO:1\n CINU 7 points 2 words UNCI:1 UNCINI:6\n CNOU 1 point 1 word UNCO:1\n CNTU 6 points 2 words CUNT:1 UNCUT:5\n COTU 6 points 1 word CUTOUT:6\n INOU 13 points 2 words NONUNION:8 UNION:5\n INTU 7 points 2 words INTUIT:6 UNIT:1\n NOTU 1 point 1 word UNTO:1\n ANU 1 point 1 word UNAU:1\n ATU 1 point 1 word TAUT:1\n ITU 5 points 1 word TUTTI:5\n NOU 1 point 1 word NOUN:1\n OTU 1 point 1 word TOUT:1\n TU 1 point 1 word TUTU:1\n"
]
],
[
[
"# Step 11: S Words\n\nWhat if we allowed honeycombs and words to have an 'S' in them?",
"_____no_output_____"
]
],
[
[
"enable1s = valid_words(open('enable1.txt').read(), \n lambda w: len(w) >= 4 and len(set(w)) <= 7)\n\nlen(enable1s), len(enable1)",
"_____no_output_____"
]
],
[
[
"Allowing 'S' more than doubles the number of words. Will it double the score of the best honeycomb?",
"_____no_output_____"
]
],
[
[
"report(words=enable1s)",
"The best Honeycomb('AEINRST', 'E') scores 8681 points on 1179 words from a 98141 word list:\n\nAEINRST 1381 points 86 pangrams ANESTRI:14 ANTISERA:15 ANTISTRESS:17 ANTSIER:14 ARENITES:15 ARSENITE:15\n ARSENITES:16 ARTINESS:15 ARTINESSES:17 ATTAINERS:16 ENTERTAINERS:19 ENTERTAINS:17 ENTRAINERS:17\n ENTRAINS:15 ENTREATIES:17 ERRANTRIES:17 INERTIAS:15 INSTANTER:16 INTENERATES:18 INTERSTATE:17\n INTERSTATES:18 INTERSTRAIN:18 INTERSTRAINS:19 INTRASTATE:17 INTREATS:15 IRATENESS:16 IRATENESSES:18\n ITINERANTS:17 ITINERARIES:18 ITINERATES:17 NASTIER:14 NITRATES:15 RAINIEST:15 RATANIES:15 RATINES:14\n REATTAINS:16 REINITIATES:18 REINSTATE:16 REINSTATES:17 RESINATE:15 RESINATES:16 RESISTANT:16\n RESISTANTS:17 RESTRAIN:15 RESTRAINER:17 RESTRAINERS:18 RESTRAINS:16 RESTRAINT:16 RESTRAINTS:17\n RETAINERS:16 RETAINS:14 RETINAS:14 RETIRANTS:16 RETRAINS:15 RETSINA:14 RETSINAS:15 SANITARIES:17\n SEATRAIN:15 SEATRAINS:16 STAINER:14 STAINERS:15 STANNARIES:17 STEARIN:14 STEARINE:15 STEARINES:16\n STEARINS:15 STRAINER:15 STRAINERS:16 STRAITEN:15 STRAITENS:16 STRAITNESS:17 STRAITNESSES:19\n TANISTRIES:17 TANNERIES:16 TEARSTAIN:16 TEARSTAINS:17 TENANTRIES:17 TERNARIES:16 TERRAINS:15\n TERTIANS:15 TRAINEES:15 TRAINERS:15 TRANSIENT:16 TRANSIENTS:17 TRISTEARIN:17 TRISTEARINS:18\n AEINRS 124 points 16 words AIRINESS:8 AIRINESSES:10 ANSERINE:8 ANSERINES:9 ARISEN:6 ARSINE:6 ARSINES:7\n INSANER:7 INSNARE:7 INSNARER:8 INSNARERS:9 INSNARES:8 SENARII:7 SIERRAN:7 SIRENIAN:8 SIRENIANS:9\n AEINRT 232 points 30 words ARENITE:7 ATTAINER:8 ENTERTAIN:9 ENTERTAINER:11 ENTRAIN:7 ENTRAINER:9 INERRANT:8\n INERTIA:7 INERTIAE:8 INTENERATE:10 INTREAT:7 ITERANT:7 ITINERANT:9 ITINERATE:9 NATTIER:7 NITRATE:7\n RATINE:6 REATTAIN:8 REINITIATE:10 RETAIN:6 RETAINER:8 RETINA:6 RETINAE:7 RETIRANT:8 RETRAIN:7\n TERRAIN:7 TERTIAN:7 TRAINEE:7 TRAINER:7 TRIENNIA:8\n AEINST 713 points 80 words ANISETTE:8 ANISETTES:9 ANTISENSE:9 ANTISTATE:9 ANTSIEST:8 ASININITIES:11\n ASSASSINATE:11 ASSASSINATES:12 ASTATINE:8 ASTATINES:9 ENTASIA:7 ENTASIAS:8 ENTASIS:7 ETESIAN:7\n ETESIANS:8 INANEST:7 INANITIES:9 INITIATES:9 INNATENESS:10 INNATENESSES:12 INSANEST:8 INSANITIES:10\n INSATIATE:9 INSATIATENESS:13 INSATIATENESSES:15 INSENSATE:9 INSTANTANEITIES:15 INSTANTIATE:11\n INSTANTIATES:12 INSTANTNESS:11 INSTANTNESSES:13 INSTATE:7 INSTATES:8 INTESTATE:9 INTESTATES:10\n ISATINE:7 ISATINES:8 NASTIES:7 NASTIEST:8 NASTINESS:9 NASTINESSES:11 NATTIEST:8 NATTINESS:9\n NATTINESSES:11 SANITATE:8 SANITATES:9 SANITIES:8 SANITISE:8 SANITISES:9 SATINET:7 SATINETS:8\n SENTENTIA:9 SENTENTIAE:10 SESTINA:7 SESTINAS:8 STANINE:7 STANINES:8 STANNITE:8 STANNITES:9 TAENIAS:7\n TAENIASES:9 TAENIASIS:9 TANSIES:7 TASTINESS:9 TASTINESSES:11 TATTINESS:9 TATTINESSES:11 TENIAS:6\n TENIASES:8 TENIASIS:8 TETANIES:8 TETANISE:8 TETANISES:9 TINEAS:6 TISANE:6 TISANES:7 TITANATES:9\n TITANESS:8 TITANESSES:10 TITANITES:9\n AEIRST 473 points 60 words AERIEST:7 AIREST:6 AIRIEST:7 ARIETTAS:8 ARIETTES:8 ARISTAE:7 ARISTATE:8 ARTERIES:8\n ARTERITIS:9 ARTIEST:7 ARTISTE:7 ARTISTES:8 ARTISTRIES:10 ARTSIER:7 ARTSIEST:8 ASSISTER:8 ASSISTERS:9\n ASTERIA:7 ASTERIAS:8 ATRESIA:7 ATRESIAS:8 ATTIRES:7 EATERIES:8 IRATEST:7 IRRITATES:9 ITERATES:8\n RARITIES:8 RATITES:7 RATTIEST:8 REITERATES:10 SATIRE:6 SATIRES:7 SATIRISE:8 SATIRISES:9 SERIATE:7\n SERIATES:8 SESTERTIA:9 STARRIER:8 STARRIEST:9 STRAITER:8 STRAITEST:9 STRIAE:6 STRIATE:7 STRIATES:8\n TARRIERS:8 TARRIES:7 TARRIEST:8 TARSIER:7 TARSIERS:8 TASTIER:7 TEARIEST:8 TERAIS:6 TERTIARIES:10\n TITRATES:8 TRAITRESS:9 TRAITRESSES:11 TREATIES:8 TREATISE:8 TREATISES:9 TRISTATE:8\n AENRST 336 points 40 words ANTEATERS:9 ANTRES:6 ARRESTANT:9 ARRESTANTS:10 ARSENATE:8 ARSENATES:9 ASSENTER:8\n ASSENTERS:9 ASTERN:6 EARNEST:7 EARNESTNESS:11 EARNESTNESSES:13 EARNESTS:8 EASTERN:7 EASTERNER:9\n EASTERNERS:10 ENTRANTS:8 ENTREATS:8 ERRANTS:7 NARRATERS:9 NARRATES:8 NATTERS:7 NEAREST:7 RANTERS:7\n RATTEENS:8 RATTENERS:9 RATTENS:7 REENTRANTS:10 RETREATANTS:11 SARSENET:8 SARSENETS:9 SERENATA:8\n SERENATAS:9 SERENATE:8 STERNA:6 TANNERS:7 TARANTASES:10 TARTNESS:8 TARTNESSES:10 TERRANES:8\n EINRST 582 points 70 words ENTERITIS:9 ENTERITISES:11 ENTIRENESS:10 ENTIRENESSES:12 ENTIRES:7 ENTIRETIES:10\n ENTRIES:7 ESTRIN:6 ESTRINS:7 ETERNISE:8 ETERNISES:9 ETERNITIES:10 INERTNESS:9 INERTNESSES:11 INERTS:6\n INSERT:6 INSERTER:8 INSERTERS:9 INSERTS:7 INSETTER:8 INSETTERS:9 INSISTER:8 INSISTERS:9 INTENSER:8\n INTEREST:8 INTERESTS:9 INTERNEES:9 INTERNES:8 INTERNIST:9 INTERNISTS:10 INTERNS:7 INTERS:6 INTERTIES:9\n NITERIES:8 NITERS:6 NITRES:6 NITRITES:8 REENTRIES:9 REINSERT:8 REINSERTS:9 REINTERS:8 RENTIERS:8\n RETINENES:9 RETINES:7 RETINITES:9 RETINITIS:9 RETINTS:7 SENTRIES:8 SERENITIES:10 SINISTER:8\n SINISTERNESS:12 SINISTERNESSES:14 SINTER:6 SINTERS:7 STERNITE:8 STERNITES:9 STINTER:7 STINTERS:8\n TEENSIER:8 TEENTSIER:9 TERRINES:8 TINNERS:7 TINTERS:7 TRIENES:7 TRIENS:6 TRIENTES:8 TRINES:6\n TRINITIES:9 TRITENESS:9 TRITENESSES:11\n AEINR 19 points 3 words INANER:6 NARINE:6 RAINIER:7\n AEINS 129 points 17 words ANISE:5 ANISES:6 ASININE:7 EASINESS:8 EASINESSES:10 INANENESS:9 INANENESSES:11\n INANES:6 INSANE:6 INSANENESS:10 INSANENESSES:12 NANNIES:7 SANIES:6 SANSEI:6 SANSEIS:7 SIENNA:6\n SIENNAS:7\n AEINT 64 points 10 words ENTIA:5 INITIATE:8 INNATE:6 TAENIA:6 TAENIAE:7 TENIA:5 TENIAE:6 TINEA:5 TITANATE:8\n TITANITE:8\n AEIRS 106 points 17 words AERIES:6 AIRERS:6 ARISE:5 ARISES:6 ARRISES:7 EASIER:6 RAISE:5 RAISER:6 RAISERS:7\n RAISES:6 RERAISE:7 RERAISES:8 SASSIER:7 SERAI:5 SERAIS:6 SIERRA:6 SIERRAS:7\n AEIRT 135 points 20 words ARIETTA:7 ARIETTE:7 ARTIER:6 ATTIRE:6 ATTRITE:7 IRATE:5 IRATER:6 IRRITATE:8\n ITERATE:7 RATITE:6 RATTIER:7 REITERATE:9 RETIA:5 RETIARII:8 TARRIER:7 TATTIER:7 TEARIER:7 TERAI:5\n TERRARIA:8 TITRATE:7\n AEIST 112 points 15 words EASIEST:7 ETATIST:7 SASSIEST:8 SATIATE:7 SATIATES:8 SATIETIES:9 SIESTA:6 SIESTAS:7\n STEATITE:8 STEATITES:9 TASSIE:6 TASSIES:7 TASTIEST:8 TATTIES:7 TATTIEST:8\n AENRS 172 points 25 words ANEARS:6 ARENAS:6 EARNERS:7 EARNS:5 ENSNARE:7 ENSNARER:8 ENSNARERS:9 ENSNARES:8\n NARES:5 NEARNESS:8 NEARNESSES:10 NEARS:5 RANEES:6 RARENESS:8 RARENESSES:10 REEARNS:7 RENNASE:7\n RENNASES:8 SANER:5 SARSEN:6 SARSENS:7 SNARE:5 SNARER:6 SNARERS:7 SNARES:6\n AENRT 132 points 19 words ANTEATER:8 ANTRE:5 ENTERA:6 ENTRANT:7 ENTREAT:7 ERRANT:6 NARRATE:7 NARRATER:8\n NATTER:6 NEATER:6 RANTER:6 RATTEEN:7 RATTEN:6 RATTENER:8 REENTRANT:9 RETREATANT:10 TANNER:6 TERNATE:7\n TERRANE:7\n AENST 217 points 32 words ANATASE:7 ANATASES:8 ANENST:6 ANNATES:7 ANSATE:6 ANTENNAS:8 ANTES:5 ASSENT:6\n ASSENTS:7 ENATES:6 ENTASES:7 ETNAS:5 NATES:5 NEATENS:7 NEATEST:7 NEATNESS:8 NEATNESSES:10 NEATS:5\n SANEST:6 SATEEN:6 SATEENS:7 SENATE:6 SENATES:7 SENSATE:7 SENSATES:8 SETENANT:8 SETENANTS:9 STANE:5\n STANES:6 TANNATES:8 TANNEST:7 TENANTS:7\n AERST 604 points 85 words AERATES:7 ARETES:6 ARREST:6 ARRESTEE:8 ARRESTEES:9 ARRESTER:8 ARRESTERS:9\n ARRESTS:7 ASSERT:6 ASSERTER:8 ASSERTERS:9 ASSERTS:7 ASTER:5 ASTERS:6 ATTESTER:8 ATTESTERS:9 EASTER:6\n EASTERS:7 EATERS:6 ERRATAS:7 ESTERASE:8 ESTERASES:9 ESTREAT:7 ESTREATS:8 RAREST:6 RASTER:6 RASTERS:7\n RATERS:6 RATES:5 RATTERS:7 REARREST:8 REARRESTS:9 REASSERT:8 REASSERTS:9 REATAS:6 RESEAT:6 RESEATS:7\n RESTART:7 RESTARTS:8 RESTATE:7 RESTATES:8 RETASTE:7 RETASTES:8 RETEARS:7 RETREATERS:10 RETREATS:8\n SEAREST:7 SEATER:6 SEATERS:7 SERRATE:7 SERRATES:8 STARE:5 STARER:6 STARERS:7 STARES:6 STARETS:7\n STARTER:7 STARTERS:8 STATER:6 STATERS:7 STEARATE:8 STEARATES:9 STRASSES:8 STRETTA:7 STRETTAS:8 TARES:5\n TARRES:6 TARTEST:7 TARTRATES:9 TASTER:6 TASTERS:7 TATERS:6 TATTERS:7 TEARERS:7 TEARS:5 TEASER:6\n TEASERS:7 TERRAS:6 TERRASES:8 TESSERA:7 TESSERAE:8 TETRAS:6 TRASSES:7 TREATERS:8 TREATS:6\n EINRS 184 points 29 words EERINESS:8 EERINESSES:10 ESERINE:7 ESERINES:8 INNERS:6 NEREIS:6 REINS:5 RENINS:6\n RENNINS:7 RERISEN:7 RESIN:5 RESINS:6 RINSE:5 RINSER:6 RINSERS:7 RINSES:6 RISEN:5 SEINER:6 SEINERS:7\n SEREIN:6 SEREINS:7 SERIN:5 SERINE:6 SERINES:7 SERINS:6 SINNER:6 SINNERS:7 SIREN:5 SIRENS:6\n EINRT 190 points 29 words ENTIRE:6 INERT:5 INTER:5 INTERN:6 INTERNE:7 INTERNEE:8 INTERTIE:8 NETTIER:7\n NITER:5 NITERIE:7 NITRE:5 NITRITE:7 NITTIER:7 REINTER:7 RENITENT:8 RENTIER:7 RETINE:6 RETINENE:8\n RETINITE:8 RETINT:6 TEENIER:7 TENTIER:7 TERRINE:7 TINIER:6 TINNER:6 TINNIER:7 TINTER:6 TRIENE:6\n TRINE:5\n EINST 469 points 58 words EINSTEIN:8 EINSTEINS:9 ENTITIES:8 INSENTIENT:10 INSET:5 INSETS:6 INSISTENT:9\n INTENSE:7 INTENSENESS:11 INTENSENESSES:13 INTENSEST:9 INTENSITIES:11 INTENTNESS:10 INTENTNESSES:12\n INTENTS:7 INTESTINE:9 INTESTINES:10 INTINES:7 NEIST:5 NETTIEST:8 NINETEENS:9 NINETIES:8 NITES:5\n NITTIEST:8 SENITI:6 SENNIT:6 SENNITS:7 SENSITISE:9 SENSITISES:10 SENTI:5 SENTIENT:8 SENTIENTS:9\n SESTINE:7 SESTINES:8 SIENITE:7 SIENITES:8 SITTEN:6 STEIN:5 STEINS:6 TEENIEST:8 TEENSIEST:9\n TEENTSIEST:10 TENNIES:7 TENNIS:6 TENNISES:8 TENNIST:7 TENNISTS:8 TENSITIES:9 TENTIEST:8 TESTINESS:9\n TESTINESSES:11 TINES:5 TINIEST:7 TININESS:8 TININESSES:10 TINNIEST:8 TINNINESS:9 TINNINESSES:11\n EIRST 262 points 38 words EERIEST:7 IRITISES:8 RESIST:6 RESISTER:8 RESISTERS:9 RESISTS:7 RESITE:6 RESITES:7\n RETIES:6 RETIREES:8 RETIRERS:8 RETIRES:7 RETRIES:7 RITES:5 RITTERS:7 SISTER:6 SISTERS:7 SITTER:6\n SITTERS:7 STIRRER:7 STIRRERS:8 STRETTI:7 TERRIERS:8 TERRIES:7 TERRITS:7 TESTIER:7 TIERS:5 TIRES:5\n TITERS:6 TITRES:6 TITTERERS:9 TITTERS:7 TRESSIER:8 TRESSIEST:9 TRIERS:6 TRIES:5 TRISTE:6 TRITEST:7\n ENRST 246 points 35 words ENTERERS:8 ENTERS:6 ENTREES:7 NERTS:5 NESTER:6 NESTERS:7 NETTERS:7 REENTERS:8\n RENEST:6 RENESTS:7 RENNETS:7 RENTERS:7 RENTES:6 RENTS:5 RESENT:6 RESENTS:7 RETENES:7 SERENEST:8\n STERN:5 STERNER:7 STERNEST:8 STERNNESS:9 STERNNESSES:11 STERNS:6 TEENERS:7 TENNERS:7 TENSER:6\n TENTERS:7 TERNES:6 TERNS:5 TERREENS:8 TERRENES:8 TERSENESS:9 TERSENESSES:11 TREENS:6\n AEIN 11 points 2 words INANE:5 NANNIE:6\n AEIR 22 points 4 words AERIE:5 AERIER:6 AIRER:5 AIRIER:6\n AEIS 13 points 2 words EASIES:6 SASSIES:7\n AEIT 6 points 1 word TATTIE:6\n AENR 40 points 9 words ANEAR:5 ARENA:5 EARN:1 EARNER:6 NEAR:1 NEARER:6 RANEE:5 REEARN:6 RERAN:5\n AENS 46 points 9 words ANES:1 ANSAE:5 SANE:1 SANENESS:8 SANENESSES:10 SANES:5 SENNA:5 SENNAS:6 SENSA:5\n AENT 63 points 13 words ANENT:5 ANTAE:5 ANTE:1 ANTENNA:7 ANTENNAE:8 ATTENT:6 EATEN:5 ENATE:5 ETNA:1 NEAT:1\n NEATEN:6 TANNATE:7 TENANT:6\n AERS 121 points 26 words AREAS:5 ARES:1 ARREARS:7 ARSE:1 ARSES:5 EARS:1 ERAS:1 ERASE:5 ERASER:6 ERASERS:7\n ERASES:6 RARES:5 RASE:1 RASER:5 RASERS:6 RASES:5 REARERS:7 REARS:5 REASSESS:8 REASSESSES:10 SAREE:5\n SAREES:6 SEAR:1 SEARER:6 SEARS:5 SERA:1\n AERT 127 points 24 words AERATE:6 ARETE:5 EATER:5 ERRATA:6 RATE:1 RATER:5 RATTER:6 REATA:5 RETEAR:6\n RETREAT:7 RETREATER:9 TARE:1 TARRE:5 TARTER:6 TARTRATE:8 TATER:5 TATTER:6 TEAR:1 TEARER:6 TERRA:5\n TERRAE:6 TETRA:5 TREAT:5 TREATER:7\n AEST 164 points 35 words ASSET:5 ASSETS:6 ATES:1 ATTEST:6 ATTESTS:7 EAST:1 EASTS:5 EATS:1 ESTATE:6\n ESTATES:7 ETAS:1 SATE:1 SATES:5 SEAT:1 SEATS:5 SETA:1 SETAE:5 STASES:6 STATE:5 STATES:6 TASSE:5\n TASSES:6 TASSET:6 TASSETS:7 TASTE:5 TASTES:6 TATES:5 TEAS:1 TEASE:5 TEASES:6 TEATS:5 TESTA:5 TESTAE:6\n TESTATE:7 TESTATES:8\n EINR 17 points 4 words INNER:5 REIN:1 RENIN:5 RENNIN:6\n EINS 53 points 10 words NINES:5 NINNIES:7 NISEI:5 NISEIS:6 SEINE:5 SEINES:6 SEISIN:6 SEISINS:7 SINE:1\n SINES:5\n EINT 28 points 6 words INTENT:6 INTINE:6 NINETEEN:8 NITE:1 TENTIE:6 TINE:1\n EIRS 101 points 20 words IRES:1 IRISES:6 REIS:1 RERISE:6 RERISES:7 RISE:1 RISER:5 RISERS:6 RISES:5 SEISER:6\n SEISERS:7 SERIES:6 SERRIES:7 SIRE:1 SIREE:5 SIREES:6 SIRES:5 SIRREE:6 SIRREES:7 SISSIER:7\n EIRT 87 points 17 words RETIE:5 RETIRE:6 RETIREE:7 RETIRER:7 RITE:1 RITTER:6 TERRIER:7 TERRIT:6 TIER:1\n TIRE:1 TITER:5 TITRE:5 TITTER:6 TITTERER:8 TRIER:5 TRITE:5 TRITER:6\n EIST 41 points 8 words SISSIEST:8 SITE:1 SITES:5 STIES:5 TESTIEST:8 TESTIS:6 TIES:1 TITTIES:7\n ENRS 80 points 12 words ERNES:5 ERNS:1 RESEEN:6 SERENE:6 SERENENESS:10 SERENENESSES:12 SERENER:7 SERENES:7\n SNEER:5 SNEERER:7 SNEERERS:8 SNEERS:6\n ENRT 104 points 19 words ENTER:5 ENTERER:7 ENTREE:6 ETERNE:6 NETTER:6 REENTER:7 RENNET:6 RENT:1 RENTE:5\n RENTER:6 RETENE:6 TEENER:6 TENNER:6 TENTER:6 TERN:1 TERNE:5 TERREEN:7 TERRENE:7 TREEN:5\n ENST 94 points 18 words ENTENTES:8 NEST:1 NESTS:5 NETS:1 NETTS:5 SENNET:6 SENNETS:7 SENT:1 SENTE:5 TEENS:5\n TENETS:6 TENS:1 TENSE:5 TENSENESS:9 TENSENESSES:11 TENSES:6 TENSEST:7 TENTS:5\n ERST 266 points 44 words ERST:1 ESTER:5 ESTERS:6 REEST:5 REESTS:6 RESET:5 RESETS:6 RESETTER:8 RESETTERS:9\n REST:1 RESTER:6 RESTERS:7 RESTRESS:8 RESTRESSES:10 RESTS:5 RETEST:6 RETESTS:7 RETS:1 SEREST:6 SETTER:6\n SETTERS:7 STEER:5 STEERER:7 STEERERS:8 STEERS:6 STERE:5 STERES:6 STREET:6 STREETS:7 STRESS:6\n STRESSES:8 STRETTE:7 TEETERS:7 TERRETS:7 TERSE:5 TERSER:6 TERSEST:7 TESTER:6 TESTERS:7 TETTERS:7\n TREES:5 TRESS:5 TRESSES:7 TRETS:5\n AER 25 points 7 words AREA:1 AREAE:5 ARREAR:6 RARE:1 RARER:5 REAR:1 REARER:6\n AES 33 points 8 words ASEA:1 ASSES:5 ASSESS:6 ASSESSES:8 EASE:1 EASES:5 SASSES:6 SEAS:1\n AET 2 points 2 words TATE:1 TEAT:1\n EIN 1 point 1 word NINE:1\n EIR 11 points 2 words EERIE:5 EERIER:6\n EIS 35 points 7 words ISSEI:5 ISSEIS:6 SEIS:1 SEISE:5 SEISES:6 SISES:5 SISSIES:7\n EIT 6 points 1 word TITTIE:6\n ENR 1 point 1 word ERNE:1\n ENS 20 points 6 words NESS:1 NESSES:6 SEEN:1 SENE:1 SENSE:5 SENSES:6\n ENT 15 points 5 words ENTENTE:7 NETT:1 TEEN:1 TENET:5 TENT:1\n ERS 52 points 13 words ERRS:1 ERSES:5 REES:1 RESEE:5 RESEES:6 SEER:1 SEERESS:7 SEERESSES:9 SEERS:5 SERE:1\n SERER:5 SERES:5 SERS:1\n ERT 27 points 7 words RETE:1 TEETER:6 TERETE:6 TERRET:6 TETTER:6 TREE:1 TRET:1\n EST 79 points 18 words SESTET:6 SESTETS:7 SETS:1 SETT:1 SETTEE:6 SETTEES:7 SETTS:5 STET:1 STETS:5 TEES:1\n TEST:1 TESTEE:6 TESTEES:7 TESTES:6 TESTS:5 TETS:1 TSETSE:6 TSETSES:7\n EN 1 point 1 word NENE:1\n ES 7 points 3 words ESES:1 ESSES:5 SEES:1\n"
]
],
[
[
"Yes it does (roughly) double the score!\n\n# Summary\n\nThis notebook showed how to find the highest-scoring honeycomb. Thanks to a series of ideas, we were able to achieve a substantial reduction in the number of honeycombs that need to be examined (a factor of 400), the run time needed for `game_score` (a factor of about 200), and the overall run time (a factor of about 70,000).\n\n- **Brute Force Enumeration** (3,364,900 honeycombs; 10 hours (estimate) run time)<br>Try every possible honeycomb.\n- **Pangram Lettersets** (55,902 honeycombs; 10 minutes (estimate) run time)<br>Try just the honeycombs that are pangram lettersets (with every center).\n- **Points Table** (55,902 honeycombs; under 2 seconds run time)<br>Precompute the score for each letterset, and sum the 64 letter subsets of each honeycomb.\n- **Branch and Bound** (8,084 honeycombs; under 1/2 second run time)<br>Try every center only for lettersets that score better than the best score so far.\n\n\n\nHere are pictures for the highest-scoring honeycombs, with and without an S:\n\n<img src=\"http://norvig.com/honeycombs.png\" width=\"350\">\n<center>\n 537 words; 3,898 points 1,179 words; 8,681 points\n <br>\n</center>",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a199efa3033642331eab686c380c266d71f403a
| 715,700 |
ipynb
|
Jupyter Notebook
|
old_notebooks/demo_3_marginal_gaussianization.ipynb
|
IPL-UV/rb
|
092d78a0ea5f9670c5cd4f70ff054ec58ff309af
|
[
"MIT"
] | 6 |
2020-10-14T08:35:29.000Z
|
2022-02-18T23:26:30.000Z
|
old_notebooks/demo_3_marginal_gaussianization.ipynb
|
jejjohnson/rbig
|
6d25401b32b318894dfcb594edde076356f73324
|
[
"MIT"
] | 11 |
2020-10-08T10:02:38.000Z
|
2021-03-26T16:00:41.000Z
|
old_notebooks/demo_3_marginal_gaussianization.ipynb
|
IPL-UV/rbig
|
092d78a0ea5f9670c5cd4f70ff054ec58ff309af
|
[
"MIT"
] | null | null | null | 723.660263 | 59,340 | 0.951474 |
[
[
[
"# Marginal Gaussianization\n\n* Author: J. Emmanuel Johnson\n* Email: [email protected]\n\nIn this demonstration, we will show how we can do the marginal Gaussianization on a 2D dataset using the Histogram transformation and Inverse CDF Gaussian distribution. \n\n",
"_____no_output_____"
]
],
[
[
"import os, sys\ncwd = os.getcwd()\n# sys.path.insert(0, f\"{cwd}/../\")\nsys.path.insert(0, \"/home/emmanuel/code/rbig\")\n\nfrom rbig.data import ToyData\nfrom rbig.transform.gaussianization import MarginalGaussianization\n# from rbig.transform.gaussianization import HistogramGaussianization, KDEGaussianization\nfrom rbig.transform import InverseGaussCDF\n\nimport numpy as np\nfrom scipy import stats\n\n# Plot Functions\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nsns.reset_defaults()\n#sns.set_style('whitegrid')\n#sns.set_context('talk')\nsns.set_context(context='talk',font_scale=0.7)\n%matplotlib inline\n\n%load_ext autoreload\n%autoreload 2",
"_____no_output_____"
]
],
[
[
"## Data\n\nFor this example, we are looking at a 2D dataset.",
"_____no_output_____"
]
],
[
[
"def plot_2d_joint(data, color='blue', title='Original Data'):\n \n fig = plt.figure(figsize=(5, 5))\n\n g = sns.jointplot(x=data[:, 0], y=data[:, 1], kind='hex', color=color)\n plt.xlabel('X')\n plt.ylabel('Y')\n plt.suptitle(title)\n plt.tight_layout()\n plt.show()\n \ndef plot_prob(data, probs, title='Probabilities'):\n \n fig, ax = plt.subplots()\n\n h = ax.scatter(data[:, 0], data[:, 1], s=1, c=probs, cmap='Reds')\n\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n cbar = plt.colorbar(h, )\n ax.set_title(title)\n plt.show()",
"_____no_output_____"
],
[
"seed = 123\nrng = np.random.RandomState(seed=seed)\ndataset = 'rbig'\nn_samples = 10_000\nn_features = 2\nnoise = 0.25\nrandom_state=1\nclusters = 2\n\n\ndata = ToyData(\n dataset=dataset, \n n_samples=n_samples, \n n_features=n_features, \n noise=noise, \n random_state=random_state,\n clusters=clusters,\n).generate_samples()\n\nX = data[:, 0]\nY = data[:, 1]\n\nplot_2d_joint(data, title='Original Data')",
"_____no_output_____"
]
],
[
[
"## Uniformization Transformation",
"_____no_output_____"
]
],
[
[
"from rbig.transform.uniformization import HistogramUniformization, KDEUniformization, MarginalUniformization\n# from rbig.density.histogram import ScipyHistogram, QuantileHistogram\n# from rbig.den",
"_____no_output_____"
]
],
[
[
"#### Initialize Uniformization Algorithm",
"_____no_output_____"
]
],
[
[
"# INITIALIZE UNIFORMIZATION ALGORITHM\n#===\n# uniform_clf = HistogramUniformization(bins=100, support_extension=10, alpha=1e-4, n_quantiles=None)\nuniform_clf = KDEUniformization(n_quantiles=50, method='fft')\n# density_clf = KDEScipy(n_quantiles=50, bw_method='scott', support_extension=10)\n# density_clf = KDESklearn(n_quantiles=100, support_extension=10)",
"_____no_output_____"
]
],
[
[
"#### Add it to Marginal Transformation Algorithm",
"_____no_output_____"
]
],
[
[
"mg_uniformizer = MarginalUniformization(uniform_clf)\n\n\nmg_uniformizer.fit(data)",
"_____no_output_____"
],
[
"X_trans = mg_uniformizer.transform(data)\n\nplot_2d_joint(X_trans, title='Transformed Data')",
"_____no_output_____"
],
[
"data_approx = mg_uniformizer.inverse_transform(X_trans)\n\nplot_2d_joint(data_approx, title='Transformed Data')",
"_____no_output_____"
],
[
"X_ldj = mg_uniformizer.log_abs_det_jacobian(data)\n\nplot_2d_joint(X_ldj, title='Transformed Data')\nplot_2d_joint(np.exp(X_ldj), title='Transformed Data')",
"_____no_output_____"
],
[
"plot_prob(data, X_ldj.sum(-1), title='Log Probabilities')\nplot_prob(data, np.exp(X_ldj.sum(-1)), title='Probabilities')",
"_____no_output_____"
]
],
[
[
"## Marginal Gaussinization",
"_____no_output_____"
]
],
[
[
"from rbig.transform.uniformization import HistogramUniformization, KDEUniformization, MarginalUniformization\nfrom rbig.transform.gaussianization import MarginalGaussianization",
"_____no_output_____"
],
[
"uniform_clf = HistogramUniformization(bins=100, support_extension=10, alpha=1e-4, n_quantiles=None)\nuniform_clf = KDEUniformization(n_quantiles=50, method='fft', )\n\nmg_gaussianizer = MarginalGaussianization(uniform_clf)\n\n\nmg_gaussianizer.fit(data)",
"_____no_output_____"
],
[
"X_trans = mg_gaussianizer.transform(data)\n\nplot_2d_joint(X_trans, title='Transformed Data')",
"_____no_output_____"
],
[
"data_approx = mg_gaussianizer.inverse_transform(X_trans)\n\nplot_2d_joint(data_approx, title='Transformed Data')",
"_____no_output_____"
],
[
"X_ldj = mg_gaussianizer.log_abs_det_jacobian(data)\n\nplot_2d_joint(X_ldj, title='Transformed Data')\nplot_2d_joint(np.exp(X_ldj), title='Transformed Data')",
"_____no_output_____"
],
[
"X_lprob = mg_gaussianizer.score_samples(data)\n\nplot_prob(data, X_lprob, title='Log Probabilities')\nplot_prob(data, np.exp(X_lprob), title='Probabilities')",
"_____no_output_____"
]
],
[
[
"### Negative Log Likelihood",
"_____no_output_____"
]
],
[
[
"X_nll = mg_gaussianizer.score(data,)\n\nprint(f\"Negative Log-Likelihood Score: {X_nll:.4f}\")",
"Negative Log-Likelihood Score: -2.8415\n"
]
],
[
[
"## Marginal Histogram Transformation\n\nSo, for this transformation, we are going to transform our data from the current distribution to a marginally Gaussian distribution and then perform a rotation. In theory, if we do enough of these, we will eventually convert to a Gaussian distribution.",
"_____no_output_____"
]
],
[
[
"# parameters\nnbins = 1_000 # number of bins to do the histogram transform\nalpha = 1e-05 # adds some regularization (noise)\nsupport_extension = 10\n\n\n# initialize the transformer\nmg_transformer = HistogramGaussianization(\n nbins=nbins,\n alpha=alpha\n)\n\n# fit the transformer to the data\nmg_transformer.fit(data);",
"_____no_output_____"
]
],
[
[
"### 1. Forward Transformation\n\nFor this transformation, we will be applying the following:\n\n$$\\Psi(\\mathbf{x}) = \\Phi^{-1}(\\mathbf{x})$$\n\nwhere $\\Phi^{-1}(\\cdot)$ is the inverse CDF of the Gaussian distribution.",
"_____no_output_____"
]
],
[
[
"data_trans = mg_transformer.transform(data)\n\nplot_2d_joint(data_trans, title='Transformed Data')",
"_____no_output_____"
]
],
[
[
"So clearly we can see that the transformation works. Both of the marginals are Gaussian distributed..",
"_____no_output_____"
],
[
"### 2. Inverse Transformation\n\nFor this step, we will apply the inverse transformation:\n\n$$\\Psi^{-1}(\\mathbf{x}) = \\Phi \\left( \\mathbf{x} \\right)$$\n\nwhere $\\Phi(\\cdot)$ is the CDF of the Gaussian distribution.",
"_____no_output_____"
]
],
[
[
"data_approx = mg_transformer.inverse_transform(data_trans)\n\n# check that its more or less equal\nnp.testing.assert_array_almost_equal(data_approx, data, decimal=1e-5)",
"_____no_output_____"
]
],
[
[
"We see that this transformation is very close to the original. In fact, it's close to approximately 1e-5 decimal places. The errors will definitely stem from the boundaries.",
"_____no_output_____"
]
],
[
[
"# Plot results\nplot_2d_joint(data_approx, title='Inverse Transformed Data')",
"_____no_output_____"
]
],
[
[
"## Log Absolute Determinant Jacobian\n\nUsing the derivative of inverse-functions theorem, we can calculate the derivative like so:\n\n$$\\nabla_\\mathbf{x} \\Phi^{-1}(\\mathbf{x}) = \\frac{1}{\\phi (\\Phi^{-1} (x)) }$$\n\nwhere $\\phi(\\cdot)$ is the PDF of the Gaussian distribution. Taking the log of these terms gives us:\n\n$$ \\log \\nabla_\\mathbf{x} \\Phi^{-1}(\\mathbf{x}) = - \\log \\phi (\\Phi^{-1} (x))$$\n\n",
"_____no_output_____"
]
],
[
[
"X_slogdet = mg_transformer.log_abs_det_jacobian(data)\n\nprint(X_slogdet.min(), X_slogdet.max())\nprint(np.exp(X_slogdet).min(), np.exp(X_slogdet).max())",
"_____no_output_____"
],
[
"# plot the gradients\nplot_2d_joint(np.exp(X_slogdet), title='Jacobian Data')",
"_____no_output_____"
]
],
[
[
"## Log Probability\n\n$$\\log p_\\theta(\\mathbf{x}) = \\log p_\\theta \\left( \\mathbf{z} \\right) + \\log \\left| \\nabla_\\mathbf{x} \\mathbf{z} \\right|$$\n\nwhere $\\mathbf{z} = \\Psi(\\mathbf{x})$\n",
"_____no_output_____"
]
],
[
[
"# score samples \nlog_prob = mg_transformer.score_samples(data)",
"_____no_output_____"
],
[
"# score samples \nlog_prob = mg_transformer.score_samples(data)\n\nplot_prob(data, log_prob, title='Log Probabilities')",
"_____no_output_____"
]
],
[
[
"## Probability\n\nThis is the same as above but without the log scale:\n\n$$p_\\theta(\\mathbf{x}) = p_\\theta \\left( \\mathbf{z} \\right) \\left| \\nabla_\\mathbf{x} \\mathbf{z} \\right|$$\n\nwhere $\\mathbf{z} = \\Psi(\\mathbf{x})$",
"_____no_output_____"
]
],
[
[
"plot_prob(data, np.exp(log_prob), title='Probabilities')",
"_____no_output_____"
]
],
[
[
"## Negative Log-Likelihood\n\nWe need to take the expected value (mean) of all log probabilities.\n\n$$\\text{nll} = \\frac{1}{N} \\sum_{n=1}^{N} \\log p_\\theta(\\mathbf{x})$$",
"_____no_output_____"
]
],
[
[
"score = mg_transformer.score(data)\n\nprint(f\"Negative Log-Likelihood Score: {score:.4f}\")",
"Negative Log-Likelihood Score: -2.0724\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a199f091869aadb3866d55fcb908c0cba33435f
| 58,390 |
ipynb
|
Jupyter Notebook
|
notebooks/autodiff_cookbook.ipynb
|
QBatista/jax
|
5edb23679f2605654949156da84e330205840695
|
[
"ECL-2.0",
"Apache-2.0"
] | 2 |
2021-06-16T15:02:03.000Z
|
2021-06-16T22:53:02.000Z
|
notebooks/autodiff_cookbook.ipynb
|
xbfibe/jax
|
9788a3584a458750792cd91e550bf2b8f802a4b6
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
notebooks/autodiff_cookbook.ipynb
|
xbfibe/jax
|
9788a3584a458750792cd91e550bf2b8f802a4b6
|
[
"ECL-2.0",
"Apache-2.0"
] | 1 |
2021-03-19T17:22:48.000Z
|
2021-03-19T17:22:48.000Z
| 37.525707 | 707 | 0.526648 |
[
[
[
"!pip install -q --upgrade jax jaxlib",
"_____no_output_____"
],
[
"from __future__ import print_function, division\nimport jax.numpy as np\nfrom jax import grad, jit, vmap\nfrom jax import random\n\nkey = random.PRNGKey(0)",
"/usr/local/lib/python2.7/dist-packages/jax/lib/xla_bridge.py:122: UserWarning: No GPU found, falling back to CPU.\n warnings.warn('No GPU found, falling back to CPU.')\n"
]
],
[
[
"# The Autodiff Cookbook\n\n*alexbw@, mattjj@* \n\nJAX has a pretty general automatic differentiation system. In this notebook, we'll go through a whole bunch of neat autodiff ideas that you can cherry pick for your own work, starting with the basics.",
"_____no_output_____"
],
[
"## Gradients",
"_____no_output_____"
],
[
"### Starting with `grad`\n\nYou can differentiate a function with `grad`:",
"_____no_output_____"
]
],
[
[
"grad_tanh = grad(np.tanh)\nprint(grad_tanh(2.0))",
"0.070650816\n"
]
],
[
[
"`grad` takes a function and returns a function. If you have a Python function `f` that evaluates the mathematical function $f$, then `grad(f)` is a Python function that evaluates the mathematical function $\\nabla f$. That means `grad(f)(x)` represents the value $\\nabla f(x)$.\n\nSince `grad` operates on functions, you can apply it to its own output to differentiate as many times as you like:",
"_____no_output_____"
]
],
[
[
"print(grad(grad(np.tanh))(2.0))\nprint(grad(grad(grad(np.tanh)))(2.0))",
"-0.13621867\n0.25265405\n"
]
],
[
[
"Let's look at computing gradients with `grad` in a linear logistic regression model. First, the setup:",
"_____no_output_____"
]
],
[
[
"def sigmoid(x):\n return 0.5 * (np.tanh(x / 2) + 1)\n\n# Outputs probability of a label being true.\ndef predict(W, b, inputs):\n return sigmoid(np.dot(inputs, W) + b)\n\n# Build a toy dataset.\ninputs = np.array([[0.52, 1.12, 0.77],\n [0.88, -1.08, 0.15],\n [0.52, 0.06, -1.30],\n [0.74, -2.49, 1.39]])\ntargets = np.array([True, True, False, True])\n\n# Training loss is the negative log-likelihood of the training examples.\ndef loss(W, b):\n preds = predict(W, b, inputs)\n label_probs = preds * targets + (1 - preds) * (1 - targets)\n return -np.sum(np.log(label_probs))\n\n# Initialize random model coefficients\nkey, W_key, b_key = random.split(key, 3)\nW = random.normal(W_key, (3,))\nb = random.normal(b_key, ())",
"_____no_output_____"
]
],
[
[
"Use the `grad` function with its `argnums` argument to differentiate a function with respect to positional arguments.",
"_____no_output_____"
]
],
[
[
"# Differentiate `loss` with respect to the first positional argument:\nW_grad = grad(loss, argnums=0)(W, b)\nprint('W_grad', W_grad)\n\n# Since argnums=0 is the default, this does the same thing:\nW_grad = grad(loss)(W, b)\nprint('W_grad', W_grad)\n\n# But we can choose different values too, and drop the keyword:\nb_grad = grad(loss, 1)(W, b)\nprint('b_grad', b_grad)\n\n# Including tuple values\nW_grad, b_grad = grad(loss, (0, 1))(W, b)\nprint('W_grad', W_grad)\nprint('b_grad', b_grad)",
"W_grad [-0.16965586 -0.8774649 -1.4901347 ]\nW_grad [-0.16965586 -0.8774649 -1.4901347 ]\nb_grad -0.2922725\nW_grad [-0.16965586 -0.8774649 -1.4901347 ]\nb_grad -0.2922725\n"
]
],
[
[
"This `grad` API has a direct correspondence to the excellent notation in Spivak's classic *Calculus on Manifolds* (1965), also used in Sussman and Wisdom's [*Structure and Interpretation of Classical Mechanics*](http://mitpress.mit.edu/sites/default/files/titles/content/sicm_edition_2/book.html) (2015) and their [*Functional Differential Geometry*](https://mitpress.mit.edu/books/functional-differential-geometry) (2013). Both books are open-access. See in particular the \"Prologue\" section of *Functional Differential Geometry* for a defense of this notation.\n\nEssentially, when using the `argnums` argument, if `f` is a Python function for evaluating the mathematical function $f$, then the Python expression `grad(f, i)` evaluates to a Python function for evaluating $\\partial_i f$.",
"_____no_output_____"
],
[
"### Differentiating with respect to nested lists, tuples, and dicts",
"_____no_output_____"
],
[
"Differentiating with respect to standard Python containers just works, so use tuples, lists, and dicts (and arbitrary nesting) however you like.",
"_____no_output_____"
]
],
[
[
"def loss2(params_dict):\n preds = predict(params_dict['W'], params_dict['b'], inputs)\n label_probs = preds * targets + (1 - preds) * (1 - targets)\n return -np.sum(np.log(label_probs))\n\nprint(grad(loss2)({'W': W, 'b': b}))",
"{'b': array(-0.2922725, dtype=float32), 'W': array([-0.16965586, -0.8774649 , -1.4901347 ], dtype=float32)}\n"
]
],
[
[
"You can [register your own container types](https://github.com/google/jax/issues/446#issuecomment-467105048) to work with not just `grad` but all the JAX transformations (`jit`, `vmap`, etc.).",
"_____no_output_____"
],
[
"### Evaluate a function and its gradient using `value_and_grad`",
"_____no_output_____"
],
[
"Another convenient function is `value_and_grad` for efficiently computing both a function's value as well as its gradient's value:",
"_____no_output_____"
]
],
[
[
"from jax import value_and_grad\nloss_value, Wb_grad = value_and_grad(loss, (0, 1))(W, b)\nprint('loss value', loss_value)\nprint('loss value', loss(W, b))",
"loss value 3.0519395\nloss value 3.0519395\n"
]
],
[
[
"### Checking against numerical differences\n\nA great thing about derivatives is that they're straightforward to check with finite differences:",
"_____no_output_____"
]
],
[
[
"# Set a step size for finite differences calculations\neps = 1e-4\n\n# Check b_grad with scalar finite differences\nb_grad_numerical = (loss(W, b + eps / 2.) - loss(W, b - eps / 2.)) / eps\nprint('b_grad_numerical', b_grad_numerical)\nprint('b_grad_autodiff', grad(loss, 1)(W, b))\n\n# Check W_grad with finite differences in a random direction\nkey, subkey = random.split(key)\nvec = random.normal(subkey, W.shape)\nunitvec = vec / np.sqrt(np.vdot(vec, vec))\nW_grad_numerical = (loss(W + eps / 2. * unitvec, b) - loss(W - eps / 2. * unitvec, b)) / eps\nprint('W_dirderiv_numerical', W_grad_numerical)\nprint('W_dirderiv_autodiff', np.vdot(grad(loss)(W, b), unitvec))",
"b_grad_numerical -0.29325485\nb_grad_autodiff -0.2922725\nW_dirderiv_numerical -0.19550323\nW_dirderiv_autodiff -0.19909078\n"
]
],
[
[
"JAX provides a simple convenience function that does essentially the same thing, but checks up to any order of differentiation that you like:",
"_____no_output_____"
]
],
[
[
"from jax.test_util import check_grads\ncheck_grads(loss, (W, b), order=2) # check up to 2nd order derivatives",
"_____no_output_____"
]
],
[
[
"### Hessian-vector products with `grad`-of-`grad`\n\nOne thing we can do with higher-order `grad` is build a Hessian-vector product function. (Later on we'll write an even more efficient implementation that mixes both forward- and reverse-mode, but this one will use pure reverse-mode.)\n\nA Hessian-vector product function can be useful in a [truncated Newton Conjugate-Gradient algorithm](https://en.wikipedia.org/wiki/Truncated_Newton_method) for minimizing smooth convex functions, or for studying the curvature of neural network training objectives (e.g. [1](https://arxiv.org/abs/1406.2572), [2](https://arxiv.org/abs/1811.07062), [3](https://arxiv.org/abs/1706.04454), [4](https://arxiv.org/abs/1802.03451)).\n\nFor a scalar-valued function $f : \\mathbb{R}^n \\to \\mathbb{R}$, the Hessian at a point $x \\in \\mathbb{R}^n$ is written as $\\partial^2 f(x)$. A Hessian-vector product function is then able to evaluate\n\n$\\qquad v \\mapsto \\partial^2 f(x) \\cdot v$\n\nfor any $v \\in \\mathbb{R}^n$.\n\nThe trick is not to instantiate the full Hessian matrix: if $n$ is large, perhaps in the millions or billions in the context of neural networks, then that might be impossible to store.\n\nLuckily, `grad` already gives us a way to write an efficient Hessian-vector product function. We just have to use the identity\n\n$\\qquad \\partial^2 f (x) v = \\partial [x \\mapsto \\partial f(x) \\cdot v] = \\partial g(x)$,\n\nwhere $g(x) = \\partial f(x) \\cdot v$ is a new scalar-valued function that dots the gradient of $f$ at $x$ with the vector $v$. Nottice that we're only ever differentiating scalar-valued functions of vector-valued arguments, which is exactly where we know `grad` is efficient.\n\nIn JAX code, we can just write this:",
"_____no_output_____"
]
],
[
[
"def hvp(f, x, v):\n return grad(lambda x: np.vdot(grad(f)(x), v))",
"_____no_output_____"
]
],
[
[
"This example shows that you can freely use lexical closure, and JAX will never get perturbed or confused.\n\nWe'll check this implementation a few cells down, once we see how to compute dense Hessian matrices. We'll also write an even better version that uses both forward-mode and reverse-mode.",
"_____no_output_____"
],
[
"## Jacobians and Hessians using `jacfwd` and `jacrev`",
"_____no_output_____"
],
[
"You can compute full Jacobian matrices using the `jacfwd` and `jacrev` functions:",
"_____no_output_____"
]
],
[
[
"from jax import jacfwd, jacrev\n\n# Isolate the function from the weight matrix to the predictions\nf = lambda W: predict(W, b, inputs)\n\nJ = jacfwd(f)(W)\nprint(\"jacfwd result, with shape\", J.shape)\nprint(J)\n\nJ = jacrev(f)(W)\nprint(\"jacrev result, with shape\", J.shape)\nprint(J)",
"jacfwd result, with shape (4, 3)\n[[ 0.05981753 0.12883775 0.08857596]\n [ 0.04015912 -0.0492862 0.0068453 ]\n [ 0.1218829 0.01406341 -0.30470726]\n [ 0.00140427 -0.00472519 0.00263776]]\njacrev result, with shape (4, 3)\n[[ 0.05981753 0.12883775 0.08857595]\n [ 0.04015912 -0.0492862 0.00684531]\n [ 0.1218829 0.01406341 -0.30470726]\n [ 0.00140427 -0.00472519 0.00263776]]\n"
]
],
[
[
"These two functions compute the same values (up to machine numerics), but differ in their implementation: `jacfwd` uses forward-mode automatic differentiation, which is more efficient for \"tall\" Jacobian matrices, while `jacrev` uses reverse-mode, which is more efficient for \"wide\" Jacobian matrices. For matrices that are near-square, `jacfwd` probably has an edge over `jacrev`.",
"_____no_output_____"
],
[
"You can also use `jacfwd` and `jacrev` with container types:",
"_____no_output_____"
]
],
[
[
"def predict_dict(params, inputs):\n return predict(params['W'], params['b'], inputs)\n\nJ_dict = jacrev(predict_dict)({'W': W, 'b': b}, inputs)\nfor k, v in J_dict.items():\n print(\"Jacobian from {} to logits is\".format(k))\n print(v)",
"Jacobian from b to logits is\n[0.11503371 0.04563536 0.2343902 0.00189767]\nJacobian from W to logits is\n[[ 0.05981753 0.12883775 0.08857595]\n [ 0.04015912 -0.0492862 0.00684531]\n [ 0.1218829 0.01406341 -0.30470726]\n [ 0.00140427 -0.00472519 0.00263776]]\n"
]
],
[
[
"For more details on forward- and reverse-mode, as well as how to implement `jacfwd` and `jacrev` as efficiently as possible, read on!",
"_____no_output_____"
],
[
"Using a composition of two of these functions gives us a way to compute dense Hessian matrices:",
"_____no_output_____"
]
],
[
[
"def hessian(f):\n return jacfwd(jacrev(f))\n\nH = hessian(f)(W)\nprint(\"hessian, with shape\", H.shape)\nprint(H)",
"hessian, with shape (4, 3, 3)\n[[[ 0.02285464 0.04922539 0.03384245]\n [ 0.04922538 0.10602392 0.07289143]\n [ 0.03384245 0.07289144 0.05011286]]\n\n [[-0.03195212 0.03921397 -0.00544638]\n [ 0.03921397 -0.04812624 0.0066842 ]\n [-0.00544638 0.0066842 -0.00092836]]\n\n [[-0.01583708 -0.00182736 0.03959271]\n [-0.00182736 -0.00021085 0.00456839]\n [ 0.03959271 0.00456839 -0.09898178]]\n\n [[-0.00103521 0.00348334 -0.00194452]\n [ 0.00348334 -0.01172098 0.00654304]\n [-0.00194452 0.00654304 -0.00365254]]]\n"
]
],
[
[
"This shape makes sense: if we start with a function $f : \\mathbb{R}^n \\to \\mathbb{R}^m$, then at a point $x \\in \\mathbb{R}^n$ we expect to get the shapes\n* $f(x) \\in \\mathbb{R}^m$, the value of $f$ at $x$,\n* $\\partial f(x) \\in \\mathbb{R}^{m \\times n}$, the Jacobian matrix at $x$,\n* $\\partial^2 f(x) \\in \\mathbb{R}^{m \\times n \\times n}$, the Hessian at $x$,\n\nand so on.\n\nTo implement `hessian`, we could have used `jacrev(jacrev(f))` or `jacrev(jacfwd(f))` or any other composition of the two. But forward-over-reverse is typically the most efficient. That's because in the inner Jacobian computation we're often differentiating a function wide Jacobian (maybe like a loss function $f : \\mathbb{R}^n \\to \\mathbb{R}$), while in the outer Jacobian computation we're differentiating a function with a square Jacobian (since $\\nabla f : \\mathbb{R}^n \\to \\mathbb{R}^n$), which is where forward-mode wins out.",
"_____no_output_____"
],
[
"## How it's made: two foundational autodiff functions",
"_____no_output_____"
],
[
"### Jacobian-Vector products (JVPs, aka forward-mode autodiff)\n\nJAX includes efficient and general implementations of both forward- and reverse-mode automatic differentiation. The familiar `grad` function is built on reverse-mode, but to explain the difference in the two modes, and when each can be useful, we need a bit of math background.\n\n#### JVPs in math\n\nMathematically, given a function $f : \\mathbb{R}^n \\to \\mathbb{R}^m$, the Jacobian matrix of $f$ evaluated at an input point $x \\in \\mathbb{R}^n$, denoted $\\partial f(x)$, is often thought of as a matrix in $\\mathbb{R}^m \\times \\mathbb{R}^n$:\n\n$\\qquad \\partial f(x) \\in \\mathbb{R}^{m \\times n}$.\n\nBut we can also think of $\\partial f(x)$ as a linear map, which maps the tangent space of the domain of $f$ at the point $x$ (which is just another copy of $\\mathbb{R}^n$) to the tangent space of the codomain of $f$ at the point $f(x)$ (a copy of $\\mathbb{R}^m$):\n\n$\\qquad \\partial f(x) : \\mathbb{R}^n \\to \\mathbb{R}^m$.\n\nThis map is called the [pushforward map](https://en.wikipedia.org/wiki/Pushforward_(differential)) of $f$ at $x$. The Jacobian matrix is just the matrix for this linear map in a standard basis.\n\nIf we don't commit to one specific input point $x$, then we can think of the function $\\partial f$ as first taking an input point and returning the Jacobian linear map at that input point:\n\n$\\qquad \\partial f : \\mathbb{R}^n \\to \\mathbb{R}^n \\to \\mathbb{R}^m$.\n\nIn particular, we can uncurry things so that given input point $x \\in \\mathbb{R}^n$ and a tangent vector $v \\in \\mathbb{R}^n$, we get back an output tangent vector in $\\mathbb{R}^m$. We call that mapping, from $(x, v)$ pairs to output tangent vectors, the *Jacobian-vector product*, and write it as\n\n$\\qquad (x, v) \\mapsto \\partial f(x) v$\n\n#### JVPs in JAX code\n\nBack in Python code, JAX's `jvp` function models this transformation. Given a Python function that evaluates $f$, JAX's `jvp` is a way to get a Python function for evaluating $(x, v) \\mapsto (f(x), \\partial f(x) v)$.",
"_____no_output_____"
]
],
[
[
"from jax import jvp\n\n# Isolate the function from the weight matrix to the predictions\nf = lambda W: predict(W, b, inputs)\n\nkey, subkey = random.split(key)\nv = random.normal(subkey, W.shape)\n\n# Push forward the vector `v` along `f` evaluated at `W`\ny, u = jvp(f, (W,), (v,))",
"_____no_output_____"
]
],
[
[
"In terms of Haskell-like type signatures, we could write\n\n```haskell\njvp :: (a -> b) -> a -> T a -> (b, T b)\n```\n\nwhere we use `T a` to denote the type of the tangent space for `a`. In words, `jvp` takes as arguments a function of type `a -> b`, a value of type `a`, and a tangent vector value of type `T a`. It gives back a pair consisting of a value of type `b` and an output tangent vector of type `T b`.",
"_____no_output_____"
],
[
"The `jvp`-transformed function is evaluated much like the original function, but paired up with each primal value of type `a` it pushes along tangent values of type `T a`. For each primitive numerical operation that the original function would have applied, the `jvp`-transformed function executes a \"JVP rule\" for that primitive that both evaluates the primitive on the primals and applies the primitive's JVP at those primal values.\n\nThat evaluation strategy has some immediate implications about computational complexity: since we evaluate JVPs as we go, we don't need to store anything for later, and so the memory cost is independent of the depth of the computation. In addition, the FLOP cost of the `jvp`-transformed function is about 2x the cost of just evaluating the function. Put another way, for a fixed primal point $x$, we can evaluate $v \\mapsto \\partial f(x) \\cdot v$ for about the same cost as evaluating $f$.\n\nThat memory complexity sounds pretty compelling! So why don't we see forward-mode very often in machine learning?\n\nTo answer that, first think about how you could use a JVP to build a full Jacobian matrix. If we apply a JVP to a one-hot tangent vector, it reveals one column of the Jacobian matrix, corresponding to the nonzero entry we fed in. So we can build a full Jacobian one column at a time, and to get each column costs about the same as one function evaluation. That will be efficient for functions with \"tall\" Jacobians, but inefficient for \"wide\" Jacobians.\n\nIf you're doing gradient-based optimization in machine learning, you probably want to minimize a loss function from parameters in $\\mathbb{R}^n$ to a scalar loss value in $\\mathbb{R}$. That means the Jacobian of this function is a very wide matrix: $\\partial f(x) \\in \\mathbb{R}^{1 \\times n}$, which we often identify with the Gradient vector $\\nabla f(x) \\in \\mathbb{R}^n$. Building that matrix one column at a time, with each call taking a similar number of FLOPs to evaluating the original function, sure seems inefficient! In particular, for training neural networks, where $f$ is a training loss function and $n$ can be in the millions or billions, this approach just won't scale.\n\nTo do better for functions like this, we just need to use reverse-mode.",
"_____no_output_____"
],
[
"### Vector-Jacobian products (VJPs, aka reverse-mode autodiff)\n\nWhere forward-mode gives us back a function for evaluating Jacobian-vector products, which we can then use to build Jacobian matrices one column at a time, reverse-mode is a way to get back a function for evaluating vector-Jacobian products (equivalently Jacobian-transpose-vector products), which we can use to build Jacobian matrices one row at a time.\n\n#### VJPs in math\n\nLet's again consider a function $f : \\mathbb{R}^n \\to \\mathbb{R}^m$.\nStarting from our notation for JVPs, the notation for VJPs is pretty simple:\n\n$\\qquad (x, v) \\mapsto v \\partial f(x)$,\n\nwhere $v$ is an element of the cotangent space of $f$ at $x$ (isomorphic to another copy of $\\mathbb{R}^m$). When being rigorous, we should think of $v$ as a linear map $v : \\mathbb{R}^m \\to \\mathbb{R}$, and when we write $v \\partial f(x)$ we mean function composition $v \\circ \\partial f(x)$, where the types work out because $\\partial f(x) : \\mathbb{R}^n \\to \\mathbb{R}^m$. But in the common case we can identify $v$ with a vector in $\\mathbb{R}^m$ and use the two almost interchageably, just like we might sometimes flip between \"column vectors\" and \"row vectors\" without much comment.\n\nWith that identification, we can alternatively think of the linear part of a VJP as the transpose (or adjoint conjugate) of the linear part of a JVP:\n\n$\\qquad (x, v) \\mapsto \\partial f(x)^\\mathsf{T} v$.\n\nFor a given point $x$, we can write the signature as\n\n$\\qquad \\partial f(x)^\\mathsf{T} : \\mathbb{R}^m \\to \\mathbb{R}^n$.\n\nThe corresponding map on cotangent spaces is often called the [pullback](https://en.wikipedia.org/wiki/Pullback_(differential_geometry))\nof $f$ at $x$. The key for our purposes is that it goes from something that looks like the output of $f$ to something that looks like the input of $f$, just like we might expect from a transposed linear function.\n\n#### VJPs in JAX code\n\nSwitching from math back to Python, the JAX function `vjp` can take a Python function for evaluating $f$ and give us back a Python function for evaluating the VJP $(x, v) \\mapsto (f(x), v^\\mathsf{T} \\partial f(x))$.",
"_____no_output_____"
]
],
[
[
"from jax import vjp\n\n# Isolate the function from the weight matrix to the predictions\nf = lambda W: predict(W, b, inputs)\n\ny, vjp_fun = vjp(f, W)\n\nkey, subkey = random.split(key)\nu = random.normal(subkey, y.shape)\n\n# Pull back the covector `u` along `f` evaluated at `W`\nv = vjp_fun(u)",
"_____no_output_____"
]
],
[
[
"In terms of Haskell-like type signatures, we could write\n\n```haskell\nvjp :: (a -> b) -> a -> (b, CT b -> CT a)\n```\n\nwhere we use `CT a` to denote the type for the cotangent space for `a`. In words, `vjp` takes as arguments a function of type `a -> b` and a point of type `a`, and gives back a pair consisting of a value of type `b` and a linear map of type `CT b -> CT a`.\n\nThis is great because it lets us build Jacobian matrices one row at a time, and the FLOP cost for evaluating $(x, v) \\mapsto (f(x), v^\\mathsf{T} \\partial f(x))$ is only about twice the cost of evaluating $f$. In particular, if we want the gradient of a function $f : \\mathbb{R}^n \\to \\mathbb{R}$, we can do it in just one call. That's how `grad` is efficient for gradient-based optimization, even for objectives like neural network training loss functions on millions or billions of parameters.\n\nThere's a cost, though: though the FLOPs are friendly, memory scales with the depth of the computation. Also, the implementation is traditionally more complex than that of forward-mode, though JAX has some tricks up its sleeve (that's a story for a future notebook!).\n\nFor more on how reverse-mode works, see [this tutorial video from the Deep Learning Summer School in 2017](http://videolectures.net/deeplearning2017_johnson_automatic_differentiation/).",
"_____no_output_____"
],
[
"## Hessian-vector products using both forward- and reverse-mode",
"_____no_output_____"
],
[
"In a previous section, we implemented a Hessian-vector product function just using reverse-mode:",
"_____no_output_____"
]
],
[
[
"def hvp(f, x, v):\n return grad(lambda x: np.vdot(grad(f)(x), v))",
"_____no_output_____"
]
],
[
[
"That's efficient, but we can do even better and save some memory by using forward-mode together with reverse-mode.\n\nMathematically, given a function $f : \\mathbb{R}^n \\to \\mathbb{R}$ to differentiate, a point $x \\in \\mathbb{R}^n$ at which to linearize the function, and a vector $v \\in \\mathbb{R}^n$, the Hessian-vector product function we want is\n\n$(x, v) \\mapsto \\partial^2 f(x) v$\n\nConsider the helper function $g : \\mathbb{R}^n \\to \\mathbb{R}^n$ defined to be the derivative (or gradient) of $f$, namely $g(x) = \\partial f(x)$. All we need is its JVP, since that will give us\n\n$(x, v) \\mapsto \\partial g(x) v = \\partial^2 f(x) v$.\n\nWe can translate that almost directly into code:",
"_____no_output_____"
]
],
[
[
"from jax import jvp, grad\n\n# forward-over-reverse\ndef hvp(f, primals, tangents):\n return jvp(grad(f), primals, tangents)[1]",
"_____no_output_____"
]
],
[
[
"Even better, since we didn't have to call `np.dot` directly, this `hvp` function works with arrays of any shape and with arbitrary container types (like vectors stored as nested lists/dicts/tuples), and doesn't even have a dependence on `jax.numpy`.\n\nHere's an example of how to use it:",
"_____no_output_____"
]
],
[
[
"def f(X):\n return np.sum(np.tanh(X)**2)\n\nkey, subkey1, subkey2 = random.split(key, 3)\nX = random.normal(subkey1, (30, 40))\nV = random.normal(subkey2, (30, 40))\n\nans1 = hvp(f, (X,), (V,))\nans2 = np.tensordot(hessian(f)(X), V, 2)\n\nprint(np.allclose(ans1, ans2, 1e-4, 1e-4))",
"True\n"
]
],
[
[
"Another way you might consider writing this is using reverse-over-forward:",
"_____no_output_____"
]
],
[
[
"# reverse-over-forward\ndef hvp_revfwd(f, primals, tangents):\n g = lambda primals: jvp(f, primals, tangents)[1]\n return grad(g)(primals)",
"_____no_output_____"
]
],
[
[
"That's not quite as good, though, because forward-mode has less overhead than reverse-mode, and since the outer differentiation operator here has to differentiate a larger computation than the inner one, keeping forward-mode on the outside works best:",
"_____no_output_____"
]
],
[
[
"# reverse-over-reverse, only works for single arguments\ndef hvp_revrev(f, primals, tangents):\n x, = primals\n v, = tangents\n return grad(lambda x: np.vdot(grad(f)(x), v))(x)\n\n\nprint(\"Forward over reverse\")\n%timeit -n10 -r3 hvp(f, (X,), (V,))\nprint(\"Reverse over forward\")\n%timeit -n10 -r3 hvp_revfwd(f, (X,), (V,))\nprint(\"Reverse over reverse\")\n%timeit -n10 -r3 hvp_revrev(f, (X,), (V,))\n\nprint(\"Naive full Hessian materialization\")\n%timeit -n10 -r3 np.tensordot(hessian(f)(X), V, 2)",
"Forward over reverse\n10 loops, best of 3: 14.3 ms per loop\nReverse over forward\n10 loops, best of 3: 17.1 ms per loop\nReverse over reverse\n10 loops, best of 3: 19.6 ms per loop\nNaive full Hessian materialization\n10 loops, best of 3: 99.2 ms per loop\n"
]
],
[
[
"## Composing VJPs, JVPs, and `vmap`",
"_____no_output_____"
],
[
"### Jacobian-Matrix and Matrix-Jacobian products\n\nNow that we have `jvp` and `vjp` transformations that give us functions to push-forward or pull-back single vectors at a time, we can use JAX's [`vmap` transformation](https://github.com/google/jax#auto-vectorization-with-vmap) to push and pull entire bases at once. In particular, we can use that to write fast matrix-Jacobian and Jacobian-matrix products.",
"_____no_output_____"
]
],
[
[
"# Isolate the function from the weight matrix to the predictions\nf = lambda W: predict(W, b, inputs)\n\n# Pull back the covectors `m_i` along `f`, evaluated at `W`, for all `i`.\n# First, use a list comprehension to loop over rows in the matrix M.\ndef loop_mjp(f, x, M):\n y, vjp_fun = vjp(f, x)\n return np.vstack([vjp_fun(mi) for mi in M])\n\n# Now, use vmap to build a computation that does a single fast matrix-matrix\n# multiply, rather than an outer loop over vector-matrix multiplies.\ndef vmap_mjp(f, x, M):\n y, vjp_fun = vjp(f, x)\n return vmap(vjp_fun)(M)\n\nkey = random.PRNGKey(0)\nnum_covecs = 128\nU = random.normal(key, (num_covecs,) + y.shape)\n\nloop_vs = loop_mjp(f, W, M=U)\nprint('Non-vmapped Matrix-Jacobian product')\n%timeit -n10 -r3 loop_mjp(f, W, M=U)\n\nprint('\\nVmapped Matrix-Jacobian product')\nvmap_vs = vmap_mjp(f, W, M=U)\n%timeit -n10 -r3 vmap_mjp(f, W, M=U)\n\nassert np.allclose(loop_vs, vmap_vs), 'Vmap and non-vmapped Matrix-Jacobian Products should be identical'",
"Non-vmapped Matrix-Jacobian product\n10 loops, best of 3: 156 ms per loop\n\nVmapped Matrix-Jacobian product\n10 loops, best of 3: 6.7 ms per loop\n"
],
[
"def loop_jmp(f, x, M):\n # jvp immediately returns the primal and tangent values as a tuple,\n # so we'll compute and select the tangents in a list comprehension\n return np.vstack([jvp(f, (W,), (si,))[1] for si in S])\n\ndef vmap_jmp(f, x, M):\n _jvp = lambda s: jvp(f, (W,), (s,))[1]\n return vmap(_jvp)(M)\n\nnum_vecs = 128\nS = random.normal(key, (num_vecs,) + W.shape)\n\nloop_vs = loop_jmp(f, W, M=S)\nprint('Non-vmapped Jacobian-Matrix product')\n%timeit -n10 -r3 loop_jmp(f, W, M=S)\nvmap_vs = vmap_jmp(f, W, M=S)\nprint('\\nVmapped Jacobian-Matrix product')\n%timeit -n10 -r3 vmap_jmp(f, W, M=S)\n\nassert np.allclose(loop_vs, vmap_vs), 'Vmap and non-vmapped Jacobian-Matrix products should be identical'",
"Non-vmapped Jacobian-Matrix product\n10 loops, best of 3: 529 ms per loop\n\nVmapped Jacobian-Matrix product\n10 loops, best of 3: 5.74 ms per loop\n"
]
],
[
[
"### The implementation of `jacfwd` and `jacrev`\n\n",
"_____no_output_____"
],
[
"Now that we've seen fast Jacobian-matrix and matrix-Jacobian products, it's not hard to guess how to write `jacfwd` and `jacrev`. We just use the same technique to push-forward or pull-back an entire standard basis (isomorphic to an identity matrix) at once.",
"_____no_output_____"
]
],
[
[
"from jax import jacrev as builtin_jacrev\n\ndef our_jacrev(f):\n def jacfun(x):\n y, vjp_fun = vjp(f, x)\n # Use vmap to do a matrix-Jacobian product.\n # Here, the matrix is the Euclidean basis, so we get all\n # entries in the Jacobian at once. \n J, = vmap(vjp_fun, in_axes=0)(np.eye(len(y)))\n return J\n return jacfun\n\nassert np.allclose(builtin_jacrev(f)(W), our_jacrev(f)(W)), 'Incorrect reverse-mode Jacobian results!'",
"_____no_output_____"
],
[
"from jax import jacfwd as builtin_jacfwd\n\ndef our_jacfwd(f):\n def jacfun(x):\n _jvp = lambda s: jvp(f, (x,), (s,))[1]\n Jt =vmap(_jvp, in_axes=1)(np.eye(len(x)))\n return np.transpose(Jt)\n return jacfun\n\nassert np.allclose(builtin_jacfwd(f)(W), our_jacfwd(f)(W)), 'Incorrect forward-mode Jacobian results!'",
"_____no_output_____"
]
],
[
[
"Interestingly, [Autograd](https://github.com/hips/autograd) couldn't do this. Our [implementation of reverse-mode `jacobian` in Autograd](https://github.com/HIPS/autograd/blob/96a03f44da43cd7044c61ac945c483955deba957/autograd/differential_operators.py#L60) had to pull back one vector at a time with an outer-loop `map`. Pushing one vector at a time through the computation is much less efficient than batching it all together with `vmap`.",
"_____no_output_____"
],
[
"Another thing that Autograd couldn't do is `jit`. Interestingly, no matter how much Python dynamism you use in your function to be differentiated, we could always use `jit` on the linear part of the computation. For example:",
"_____no_output_____"
]
],
[
[
"def f(x):\n try:\n if x < 3:\n return 2 * x ** 3\n else:\n raise ValueError\n except ValueError:\n return np.pi * x\n\ny, f_vjp = vjp(f, 4.)\nprint(jit(f_vjp)(1.))",
"(array(3.1415927, dtype=float32),)\n"
]
],
[
[
"## Complex numbers and differentiation",
"_____no_output_____"
],
[
"JAX is great at complex numbers and differentiation. To support both [holomorphic and non-holomorphic differentiation](https://en.wikipedia.org/wiki/Holomorphic_function), JAX follows [Autograd's convention](https://github.com/HIPS/autograd/blob/master/docs/tutorial.md#complex-numbers) for encoding complex derivatives.\n\nConsider a complex-to-complex function $f: \\mathbb{C} \\to \\mathbb{C}$ that we break down into its component real-to-real functions:",
"_____no_output_____"
]
],
[
[
"def f(z):\n x, y = real(z), imag(z)\n return u(x, y), v(x, y) * 1j",
"_____no_output_____"
]
],
[
[
"That is, we've decomposed $f(z) = u(x, y) + v(x, y) i$ where $z = x + y i$. We define `grad(f)` to correspond to",
"_____no_output_____"
]
],
[
[
"def grad_f(z):\n x, y = real(z), imag(z)\n return grad(u, 0)(x, y) + grad(u, 1)(x, y) * 1j",
"_____no_output_____"
]
],
[
[
"In math symbols, that means we define $\\partial f(z) \\triangleq \\partial_0 u(x, y) + \\partial_1 u(x, y)$. So we throw out $v$, ignoring the complex component function of $f$ entirely!",
"_____no_output_____"
],
[
"This convention covers three important cases:\n1. If `f` evaluates a holomorphic function, then we get the usual complex derivative, since $\\partial_0 u = \\partial_1 v$ and $\\partial_1 u = - \\partial_0 v$.\n2. If `f` is evaluates the real-valued loss function of a complex parameter `x`, then we get a result that we can use in gradient-based optimization by taking steps in the direction of the conjugate of `grad(f)(x)`.\n3. If `f` evaluates a real-to-real function, but its implementation uses complex primitives internally (some of which must be non-holomorphic, e.g. FFTs used in convolutions) then we get the same result that an implementation that only used real primitives would have given.\n\nBy throwing away `v` entirely, this convention does not handle the case where `f` evaluates a non-holomorphic function and you want to evaluate all of $\\partial_0 u$, $\\partial_1 u$, $\\partial_0 v$, and $\\partial_1 v$ at once. But in that case the answer would have to contain four real values, and so there's no way to express it as a single complex number.",
"_____no_output_____"
],
[
"You should expect complex numbers to work everywhere in JAX. Here's differentiating through a Cholesky decomposition of a complex matrix:",
"_____no_output_____"
]
],
[
[
"A = np.array([[5., 2.+3j, 5j],\n [2.-3j, 7., 1.+7j],\n [-5j, 1.-7j, 12.]])\n\ndef f(X):\n L = np.linalg.cholesky(X)\n return np.sum((L - np.sin(L))**2)\n\ngrad(f)(A)",
"/usr/local/lib/python2.7/dist-packages/jax/numpy/linalg.py:51: UserWarning: numpy.linalg support is experimental and may cause silent failures or wrong outputs\n warnings.warn(_EXPERIMENTAL_WARNING)\n"
]
],
[
[
"For primitives' JVP rules, writing the primals as $z = a + bi$ and the tangents as $t = c + di$, we define the Jacobian-vector product $t \\mapsto \\partial f(z) \\cdot t$ as\n\n$t \\mapsto\n\\begin{matrix} \\begin{bmatrix} 1 & 1 \\end{bmatrix} \\\\ ~ \\end{matrix}\n\\begin{bmatrix} \\partial_0 u(a, b) & -\\partial_0 v(a, b) \\\\ - \\partial_1 u(a, b) i & \\partial_1 v(a, b) i \\end{bmatrix}\n\\begin{bmatrix} c \\\\ d \\end{bmatrix}$.",
"_____no_output_____"
],
[
"See Chapter 4 of [Dougal's PhD thesis](https://dougalmaclaurin.com/phd-thesis.pdf) for more details.",
"_____no_output_____"
],
[
"# More advanced autodiff\n\nIn this notebook, we worked through some easy, and then progressively more complicated, applications of automatic differentiation in JAX. We hope you now feel that taking derivatives in JAX is easy and powerful. \n\nThere's a whole world of other autodiff tricks and functionality out there. Topics we didn't cover, but hope to in a \"Advanced Autodiff Cookbook\" include:\n\n - Gauss-Newton Vector Products, linearizing once\n - Custom VJPs and JVPs\n - Efficient derivatives at fixed-points\n - Estimating the trace of a Hessian using random Hessian-vector products.\n - Forward-mode autodiff using only reverse-mode autodiff.\n - Taking derivatives with respect to custom data types.\n - Checkpointing (binomial checkpointing for efficient reverse-mode, not model snapshotting).\n - Optimizing VJPs with Jacobian pre-accumulation.",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
]
] |
4a19aa18fb556ca2c5f44f96f6a171f5389d4e6e
| 11,945 |
ipynb
|
Jupyter Notebook
|
content/lessons/09/Class-Coding-Lab/CCL-Lists.ipynb
|
eldontsoi/Summe-Project-2019
|
4960aa2fd290438ede047bfa89b26e7617521366
|
[
"MIT"
] | null | null | null |
content/lessons/09/Class-Coding-Lab/CCL-Lists.ipynb
|
eldontsoi/Summe-Project-2019
|
4960aa2fd290438ede047bfa89b26e7617521366
|
[
"MIT"
] | null | null | null |
content/lessons/09/Class-Coding-Lab/CCL-Lists.ipynb
|
eldontsoi/Summe-Project-2019
|
4960aa2fd290438ede047bfa89b26e7617521366
|
[
"MIT"
] | null | null | null | 26.195175 | 201 | 0.52432 |
[
[
[
"# In-Class Coding Lab: Lists\n\nThe goals of this lab are to help you understand:\n\n - List indexing and slicing\n - List methods such as insert, append, find, delete\n - How to iterate over lists with loops\n \n## Python Lists work like Real-Life Lists\n \nIn real life, we make lists all the time. To-Do lists. Shopping lists. Reading lists. These lists are collections of items, for example here's my shopping list:\n \n ```\n Milk, Eggs, Bread, Beer\n ```\n\nThere are 4 items in this list.\n\nLikewise, we can make a similar list in Python, and count the number of items in the list using the `len()` function:",
"_____no_output_____"
]
],
[
[
"shopping_list = [ 'Milk', 'Eggs', 'Bread', 'Beer']\nitem_count = len(shopping_list)\nprint(\"List: %s has %d items\" % (shopping_list, item_count))",
"List: ['Milk', 'Eggs', 'Bread', 'Beer'] has 4 items\n"
]
],
[
[
"## Enumerating Your List Items\n\nIn real-life, we *enumerate* lists all the time. We go through the items on our list one at a time and make a decision, for example: \"Did I add that to my shopping cart yet?\"\n\nIn Python we go through items in our lists with the `for` loop. We use `for` because the number of items in pre-determined and thus a **definite** loop is the appropriate choice. \n\nHere's an example:",
"_____no_output_____"
]
],
[
[
"for item in shopping_list:\n print(\"I need to buy some %s \" % (item))",
"I need to buy some Milk \nI need to buy some Eggs \nI need to buy some Bread \nI need to buy some Beer \n"
]
],
[
[
"## Now You Try It!\n\nWrite code in the space below to print each stock on its own line.",
"_____no_output_____"
]
],
[
[
"stocks = [ 'IBM', 'AAPL', 'GOOG', 'MSFT', 'TWTR', 'FB']\n#TODO: Write code here\nprint(\"Here are the stocks I invested $ 1 000 000 000 \")\nfor item in stocks:\n \n print (item)",
"Here are the stocks I invested $ 1 000 000 000 \nIBM\nAAPL\nGOOG\nMSFT\nTWTR\nFB\n"
]
],
[
[
"## Indexing Lists\n\nSometimes we refer to our items by their place in the list. For example \"Milk is the first item on the list\" or \"Beer is the last item on the list.\"\n\nWe can also do this in Python, and it is called *indexing* the list. \n\n**IMPORTANT** The first item in a Python lists starts at index **0**.",
"_____no_output_____"
]
],
[
[
"print(\"The first item in the list is:\", shopping_list[0]) \nprint(\"The last item in the list is:\", shopping_list[3]) \nprint(\"This is also the last item in the list:\", shopping_list[-1]) \nprint(\"This is the second to last item in the list:\", shopping_list[-2])\n",
"The first item in the list is: Milk\nThe last item in the list is: Beer\nThis is also the last item in the list: Beer\nThis is the second to last item in the list: Bread\n"
]
],
[
[
"## For Loop with Index\n\nYou can also loop through your Python list using an index. In this case we use the `range()` function to determine how many times we should loop:",
"_____no_output_____"
]
],
[
[
"for i in range(len(shopping_list)):\n print(\"I need to buy some %s \" % (shopping_list[i]))",
"I need to buy some Milk \nI need to buy some Eggs \nI need to buy some Bread \nI need to buy some Beer \n"
]
],
[
[
"## Now You Try It!\n\nWrite code to print the 2nd and 4th stocks in the list variable `stocks`. For example:\n\n`AAPL MSFT`",
"_____no_output_____"
]
],
[
[
"#TODO: Write code here\nstocks = [ 'IBM', 'AAPL', 'GOOG', 'MSFT', 'TWTR', 'FB']\n\nprint(stocks [1],stocks[3])\n",
"AAPL MSFT\n"
]
],
[
[
"## Lists are Mutable\n\nUnlike strings, lists are mutable. This means we can change a value in the list.\n\nFor example, I want `'Craft Beer'` not just `'Beer'`:",
"_____no_output_____"
]
],
[
[
"print(shopping_list)\nshopping_list[-1] = 'Craft Beer'\nprint(shopping_list)",
"['Milk', 'Eggs', 'Bread', 'Beer']\n['Milk', 'Eggs', 'Bread', 'Craft Beer']\n"
]
],
[
[
"## List Methods\n\nIn your readings and class lecture, you encountered some list methods. These allow us to maniupulate the list by adding or removing items.",
"_____no_output_____"
]
],
[
[
"print(\"Shopping List: %s\" %(shopping_list))\n\nprint(\"Adding 'Cheese' to the end of the list...\")\nshopping_list.append('Cheese') #add to end of list\nprint(\"Shopping List: %s\" %(shopping_list))\n\nprint(\"Adding 'Cereal' to position 0 in the list...\")\nshopping_list.insert(0,'Cereal') # add to the beginning of the list (position 0)\nprint(\"Shopping List: %s\" %(shopping_list))\n\nprint(\"Removing 'Cheese' from the list...\")\nshopping_list.remove('Cheese') # remove 'Cheese' from the list\nprint(\"Shopping List: %s\" %(shopping_list))\n\nprint(\"Removing item from position 0 in the list...\")\ndel shopping_list[0] # remove item at position 0\nprint(\"Shopping List: %s\" %(shopping_list))\n",
"Shopping List: ['Milk', 'Eggs', 'Bread', 'Craft Beer']\nAdding 'Cheese' to the end of the list...\nShopping List: ['Milk', 'Eggs', 'Bread', 'Craft Beer', 'Cheese']\nAdding 'Cereal' to position 0 in the list...\nShopping List: ['Cereal', 'Milk', 'Eggs', 'Bread', 'Craft Beer', 'Cheese']\nRemoving 'Cheese' from the list...\nShopping List: ['Cereal', 'Milk', 'Eggs', 'Bread', 'Craft Beer']\nRemoving item from position 0 in the list...\nShopping List: ['Milk', 'Eggs', 'Bread', 'Craft Beer']\n"
]
],
[
[
"## Now You Try It!\n\nWrite a program to remove the following stocks: `IBM` and `TWTR`\n\nThen add this stock to the end `NFLX` and this stock to the beginning `TSLA`\n\nPrint your list when you are done. It should look like this:\n\n`['TSLA', 'AAPL', 'GOOG', 'MSFT', 'FB', 'NFLX']`\n",
"_____no_output_____"
]
],
[
[
"# TODO: Write Code here\nstocks = [ 'IBM', 'AAPL', 'GOOG', 'MSFT', 'TWTR', 'FB']\n\n#print(stocks)\nstocks.append(\"NFLX\")\nstocks.remove('IBM')\nstocks.remove('TWTR')\nstocks.insert(0,'TSLA')\n\nprint(stocks)",
"['TSLA', 'AAPL', 'GOOG', 'MSFT', 'FB', 'NFLX']\n"
]
],
[
[
"## Sorting\n\nSince Lists are mutable. You can use the `sort()` method to re-arrange the items in the list alphabetically (or numerically if it's a list of numbers)",
"_____no_output_____"
]
],
[
[
"print(\"Before Sort:\", shopping_list)\nshopping_list.sort() \nprint(\"After Sort:\", shopping_list)",
"Before Sort: ['Milk', 'Eggs', 'Bread', 'Craft Beer']\nAfter Sort: ['Bread', 'Craft Beer', 'Eggs', 'Milk']\n"
]
],
[
[
"# Putting it all together\n\nWinning Lotto numbers. When the lotto numbers are drawn, they are in any order, when they are presented they're allways sorted. Let's write a program to input 5 numbers then output them sorted\n\n```\n1. for i in range(5)\n2. input a number\n3. append the number you input to the lotto_numbers list\n4. sort the lotto_numbers list\n5. print the lotto_numbers list like this: \n 'today's winning numbers are [1, 5, 17, 34, 56]'\n```",
"_____no_output_____"
]
],
[
[
"## TODO: Write program here:\n \nlotto_numbers = [] # start with an empty list\nfor i in range(5):\n inp = input(\"input a number: \")\n lotto_numbers.append(inp)\nlotto_numbers.sort()\n#print(lotto_numbers)\nprint(\"today's winning numbers are\", lotto_numbers)",
"input a number: 87\ninput a number: 6\ninput a number: 5\ninput a number: 4\ninput a number: 2\ntoday's winning numbers are ['2', '4', '5', '6', '87']\n"
],
[
"import random",
"_____no_output_____"
]
],
[
[
"##### 5\n##### 5\n##### ",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
4a19c4a7073bf0b72062cf00b4a9d5fbe9f94bb3
| 12,564 |
ipynb
|
Jupyter Notebook
|
Python Assignment 6.ipynb
|
sachinshubhams/Python-Tutorial
|
c1e1bc672f995112920cd71a07b5e671de0c7f22
|
[
"MIT"
] | null | null | null |
Python Assignment 6.ipynb
|
sachinshubhams/Python-Tutorial
|
c1e1bc672f995112920cd71a07b5e671de0c7f22
|
[
"MIT"
] | null | null | null |
Python Assignment 6.ipynb
|
sachinshubhams/Python-Tutorial
|
c1e1bc672f995112920cd71a07b5e671de0c7f22
|
[
"MIT"
] | 3 |
2021-06-29T00:27:47.000Z
|
2021-07-04T02:43:58.000Z
| 22.556553 | 280 | 0.468322 |
[
[
[
"### 1. Conditionals. Study the following code:\n<code>\n\nprint (\"statement A\")\nif x > 0:\n print (\"statement B\")\nelif x < 0:\n print( \"statement C\")\nelse:\n print (\"statement D\")\nprint ( \"statement E\")\n \n\n</code> ",
"_____no_output_____"
]
],
[
[
"ans=input(\"Which of the statements above (A, B, C, D, E) will be printed if x < 0?\\n\") \nprint (ans)\n",
"Which of the statements above (A, B, C, D, E) will be printed if x < 0?\nstatement A statement C statement E\nstatement A statement C statement E\n"
],
[
"ans=input(\"Which of the statements above will be printed if x == 0?\\n\") \nprint (ans)",
"Which of the statements above will be printed if x == 0?\nstatement A statement D statement E\nstatement A statement D statement E\n"
],
[
"ans=input(\"Which of the statements above will be printed if x > 0?\\n\") \nprint (ans)",
"Which of the statements above will be printed if x > 0?\nstatement A statement B statement E\nstatement A statement B statement E\n"
]
],
[
[
"### 2. What are the value(s) returned after executing the following range code(s)\n ",
"_____no_output_____"
]
],
[
[
"my_answer = eval (input (f'What is the value of list(range(5))?\\n'))\n\nprint(my_answer)\n\nif my_answer== list(range(5)):\n print(\"You are Correct\")\nelse:\n print(\"Wrong Answer, Try Again\")\n",
"What is the value of list(range(5))?\n[0, 1, 2, 3, 4]\n[0, 1, 2, 3, 4]\nYou are Correct\n"
],
[
"my_answer = eval (input (f'What is the value of list(range(1,10))?\\n'))\n\nprint(my_answer)\n\nif my_answer== list(range(1,10)):\n print(\"You are Correct\")\nelse:\n print(\"Wrong Answer, Try Again\")",
"What is the value of list(range(1,10))?\n[1, 2, 3, 4,5,6,7,8,9]\n[1, 2, 3, 4, 5, 6, 7, 8, 9]\nYou are Correct\n"
],
[
"my_answer = eval (input (f'What is the value of list(range(1,30,5))?\\n'))\n\nprint(my_answer)\n\nif my_answer== list(range(1,30,5)):\n print(\"You are Correct\")\nelse:\n print(\"Wrong Answer, Try Again\")",
"What is the value of list(range(1,30,5))?\n[1,6,11,16,21,26]\n[1, 6, 11, 16, 21, 26]\nYou are Correct\n"
],
[
"my_answer = eval (input (f'What is the value of list(range(1,10,-3))?\\n'))\n\nprint(my_answer)\n\nif my_answer== list(range(1,10, -3)):\n print(\"You are Correct\")\nelse:\n print(\"Wrong Answer, Try Again\")",
"What is the value of list(range(1,10,-3))?\n[]\n[]\nYou are Correct\n"
]
],
[
[
"### 3. What argument(s) could we give to the range() built-in function if we wanted the following lists to be generated? ",
"_____no_output_____"
],
[
"0, 1, 2, 3, 4, 5, 6, 7, 8, 9",
"_____no_output_____"
]
],
[
[
"range(0,10)",
"_____no_output_____"
]
],
[
[
"3, 6, 9, 12, 15, 18",
"_____no_output_____"
]
],
[
[
"range(3,20,3)",
"_____no_output_____"
]
],
[
[
"-20, 200, 420, 640, 860",
"_____no_output_____"
]
],
[
[
"range(-20,861,220)",
"_____no_output_____"
]
],
[
[
"### 4. Consider the following variables have been defined with these values:\n<code>\na = 3\nb = 0\nc = -4\nd = 10\n</code> \nAnswer what would be the result of the following Boolean expressions:",
"_____no_output_____"
]
],
[
[
"a = 3; b = 0; c = -4; d = 10\nmy_answer = eval (input (f'What is the value of (a > c) and (d != b)?\\n'))\n\nprint(my_answer)\n\nif my_answer== ( (a > c) and (d != b) ):\n print(\"You are Correct\")\nelse:\n print(\"Wrong Answer, Try Again\")",
"What is the value of (a > c) and (d != b)?\nTrue\nTrue\nYou are Correct\n"
],
[
"a = 3; b = 0; c = -4; d = 10\nmy_answer = eval (input (f'What is the value of (c <= b) or (b<= d)?\\n'))\n\nprint(my_answer)\n\nif my_answer== ( (c <= b) or (b<= d)):\n print(\"You are Correct\")\nelse:\n print(\"Wrong Answer, Try Again\")",
"What is the value of (c <= b) or (b<= d)?\nTrue\nTrue\nYou are Correct\n"
],
[
"a = 3; b = 0; c = -4; d = 10\nmy_answer = eval (input (f'What is the value of not((c == d) and (c < a))?\\n'))\n\nprint(my_answer)\n\nif my_answer == (not((c == d) and (c < a))):\n print(\"You are Correct\")\nelse:\n print(\"Wrong Answer, Try Again\")",
"What is the value of not((c == d) and (c < a))?\nTrue\nTrue\nYou are Correct\n"
]
],
[
[
"### 5. \nWrite a program to play the guess the number game. The computer will pick a\nnumber between 1 and 100 and the user will try to guess the number in as few guesses\nas possible. Input: The user will enter a guess until the correct guess is entered The\nprogram will keep asking for a guess until the correct guess is entered. The program will\nalso give hints to the user (number is too high, or too low) Output: The program will\noutput the number of guesses. Below is a sample run of how the program should\nproceed:",
"_____no_output_____"
],
[
"guess_game()<br><br>\nI am thinking of a number between 1 and 100\n<br>Can you guess what the number is?\n<br>Enter your guess<br>\n10\n<br>Too low!\n<br>Enter your guess<br>\n60\n<br>Too high!\n<br>Enter your guess<br>\n45\n<br>You win!!\n<br>You solved the problem in 3 guesses",
"_____no_output_____"
]
],
[
[
"\nimport random\ndef guess_game():\n cnt = 0\n randomNum = random.randint(1, 100)\n print('I am thinking of a number between 1 and 100'+' \\n'+'Can you guess what the number is?')\n while cnt < 1000:\n print('Enter your guess') \n guessNum = int(input())\n cnt +=1\n\n if (guessNum < randomNum):\n print('Too low!')\n\n if (guessNum > randomNum):\n print('Too high!')\n\n if (guessNum == randomNum):\n break\n\n if guessNum == randomNum:\n cnt = str(cnt)\n print('You win!!'+\"\\n\"+'You solved the problem in ' + cnt + ' guesses!')\n \n \nguess_game()",
"I am thinking of a number between 1 and 100 \nCan you guess what the number is?\nEnter your guess\n50\nToo low!\nEnter your guess\n75\nToo low!\nEnter your guess\n89\nToo low!\nEnter your guess\n96\nToo low!\nEnter your guess\n98\nYou win!!\nYou solved the problem in 5 guesses!\n"
]
],
[
[
"#### 6\nLoops. Write a program to have the user input three (3) numbers: (f)rom, <br>(t)o, and (i)ncrement. Count from f to t in increments of i, inclusive of <br> f and t. For example, if the input is f == 2, t == 24, and i == 4,<br> the program would output: 2, 6, 10, 14, 18, 22.",
"_____no_output_____"
]
],
[
[
"def my_loop(f,t,i):\n temp=0\n for inc in range(f,t):\n if (f<t):\n f=f+i\n print(f-i) \nmy_loop(2,24,4)\n",
"2\n6\n10\n14\n18\n22\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a19cd44af68a8499229a82fa449081e5a94230f
| 12,383 |
ipynb
|
Jupyter Notebook
|
Blackjack/Blackjack.ipynb
|
preetparmar/Python-Games
|
39fdbe4772f86e737d7a12b5e1e55ec61184235e
|
[
"MIT"
] | null | null | null |
Blackjack/Blackjack.ipynb
|
preetparmar/Python-Games
|
39fdbe4772f86e737d7a12b5e1e55ec61184235e
|
[
"MIT"
] | null | null | null |
Blackjack/Blackjack.ipynb
|
preetparmar/Python-Games
|
39fdbe4772f86e737d7a12b5e1e55ec61184235e
|
[
"MIT"
] | null | null | null | 31.113065 | 129 | 0.480174 |
[
[
[
"Importing Libraries",
"_____no_output_____"
]
],
[
[
"import random",
"_____no_output_____"
]
],
[
[
"Defining Variables",
"_____no_output_____"
]
],
[
[
"playing = True\ngame_session = True\nsuits = ['Hearts', 'Diamonds', 'Spades', 'Clubs']\nranks = ['Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Ten', 'Jack', 'Queen', 'King', 'Ace']\nvalues = {\n 'Two':2, 'Three':3, 'Four':4, 'Five':5, 'Six':6, 'Seven':7, 'Eight':8, \n 'Nine':9, 'Ten':10, 'Jack':10, 'Queen':10, 'King':10, 'Ace':11\n }\n",
"_____no_output_____"
]
],
[
[
"Defining Class",
"_____no_output_____"
]
],
[
[
"# Defining Card Class\nclass Card:\n # Initializing Class\n def __init__(self, rank, suit):\n self.rank = rank\n self.suit = suit\n \n # Defining print method\n def __str__(self):\n return f'{self.rank} of {self.suit}'\n\n# Defining Deck Class\nclass Deck:\n # Initializing Class\n def __init__(self):\n self.deck = []\n self.value = 0\n for suit in suits:\n for rank in ranks:\n self.deck.append(Card(rank, suit))\n \n # Defining print method\n def __str__(self):\n complete_deck = ''\n for card in self.deck:\n complete_deck += f'\\n {card.__str__()}'\n return f'The deck has {complete_deck}'\n\n # Adding a method for shuffling the deck\n def shuffle(self):\n return random.shuffle(self.deck)\n\n # Adding a method for dealing the card\n def deal(self):\n return self.deck.pop()\n\n\n# Defining Hand Class\nclass Hand:\n # Initializing the class\n def __init__(self):\n self.cards = []\n self.value = 0\n self.aces = 0\n\n # Function to add a card to the player cards\n def add_card(self, card):\n self.cards.append(card)\n self.value += values[card.rank]\n if card.rank == 'Ace':\n self.aces += 1\n\n # Handling Ace's value (if total value exceeds 21 then the value of ace changes to 1)\n def adjust_ace_value(self):\n while self.value > 21 and self.aces:\n self.value -= 10\n self.aces -= 1\n\n\n# Defining Chips Class\nclass Chips:\n # Initialzing Class\n def __init__(self, total=100):\n self.total = total\n self.bet = 0\n\n # Function if player wins the bet\n def bet_won(self):\n self.total += self.bet\n \n # Function if player loses the bet\n def bet_lost(self):\n self.total -= self.bet\n\n",
"_____no_output_____"
]
],
[
[
"Defining Functions",
"_____no_output_____"
]
],
[
[
"# Fucntion for taking bet\ndef bet_this(chips):\n while True:\n try: \n chips.bet = int(input(f\"\\nHow much would you like to bet?\\n(You have total of {chips.total} chips)\\n\"))\n except ValueError:\n print('Sorry, the bet amount should be an integer value\\n')\n else:\n if chips.bet > chips.total:\n print(f\"Sorry you don't have that many chips. You only have {chips.total} chips.\\n\")\n else:\n print(f\"You bet {chips.bet} chips on this round!\\n\")\n break\n\n# Function to show some cards\ndef show_some_cards(dealer, player):\n print(f\"Dealer's Card: <Card Hidden> {dealer.cards[1]}\\n\")\n print(\"Your Cards:\")\n print(*player.cards, sep='\\n')\n print(f\"Your current value: {player.value}\\n\")\n\n# Function to show all the cards\ndef show_all_cards(dealer, player):\n print(\"Dealer's Cards:\")\n print(*dealer.cards, sep='\\n')\n print(f\"Dealer's final value: {dealer.value}\\n\")\n\n print(\"Your Cards:\")\n print(*player.cards, sep='\\n')\n print(f\"Your final value: {player.value}\")\n\n# Function to ask whether the player wants to hit or stand\ndef hit_or_stand(dealer_hand, player_hand, deck, chips):\n global playing\n\n while True:\n choice = input(\"Would you like to Hit or Stand?\\nEnter 'h' or 's'\\n\")\n if choice[0].lower() == 'h':\n print('Player wants to hit\\n')\n hit_card(player_hand, deck)\n player_hand.adjust_ace_value()\n\n\n if player_hand.value > 21:\n player_lost(chips)\n show_some_cards(dealer_hand, player_hand)\n playing = False\n break\n elif player_hand.value == 21:\n player_won(chips)\n show_some_cards(dealer_hand, player_hand)\n playing = False\n break\n else:\n show_some_cards(dealer_hand, player_hand)\n\n elif choice[0].lower() == 's':\n print('Player wants to stay\\n')\n break\n\n else:\n print('Please select from the given options only!\\n')\n continue\n\n# Function for dealing the card\ndef hit_card(hand, deck):\n hand.add_card(deck.deal())\n hand.adjust_ace_value()\n\n# Functions for all the game ending scenarios\ndef player_won(chips):\n chips.bet_won()\n print('\\nCongratulation! You WON!!!')\n print(f'You won {chips.bet} chips\\nYou have {chips.total} chips in total.\\n')\n\ndef player_lost(chips):\n chips.bet_lost()\n print('\\nOoops!!')\n print(f'Sorry you lost {chips.bet} chips\\nYou have {chips.total} chips remaining.\\n')\n\ndef dealer_won(chips):\n chips.bet_lost()\n print('\\nOpps! Dealor Won!')\n print(f'Sorry you lost {chips.bet} chips\\nYou have {chips.total} chips remaining.\\n')\n\ndef dealer_lost(chips):\n chips.bet_won()\n print('\\nDealer lost! That means YOU WON!!!')\n print(f'You won {chips.bet} chips\\nYou have {chips.total} chips in total.\\n')\n\ndef tie():\n print('\\nDealer and Player Tie!')\n\n",
"_____no_output_____"
]
],
[
[
"Game Begins",
"_____no_output_____"
]
],
[
[
"# Welcoming the player\nprint('Welcome to the game of Blackjack\\n')\n\n# Initializing the player chips\nplayer_chips = Chips(100)\n\nwhile game_session:\n playing = True\n # Getting a fresh deck and shuffling it\n deck = Deck()\n deck.shuffle()\n\n # Dealing Player Hand\n player_hand = Hand()\n player_hand.add_card(deck.deal())\n player_hand.add_card(deck.deal())\n\n # Dealing Dealer Hand\n dealer_hand = Hand()\n dealer_hand.add_card(deck.deal())\n dealer_hand.add_card(deck.deal())\n\n # Asking for the bet amount and showing some cards\n bet_this(player_chips)\n show_some_cards(dealer_hand, player_hand)\n\n\n if player_hand.value == 21:\n player_won(player_chips)\n print(f\"YOU won {player_chips.bet} chips!!!\")\n print(f\"You now have a total of {player_chips.total} chips\")\n else:\n hit_or_stand(dealer_hand, player_hand, deck, player_chips)\n\n while playing:\n while dealer_hand.value < 17:\n dealer_hand.add_card(deck.deal())\n if dealer_hand.value > 21:\n dealer_lost(player_chips)\n show_all_cards(dealer_hand, player_hand)\n break\n elif dealer_hand.value > player_hand.value:\n dealer_won(player_chips)\n show_all_cards(dealer_hand, player_hand)\n break\n elif dealer_hand.value < player_hand.value:\n player_won(player_chips)\n show_all_cards(dealer_hand, player_hand)\n break\n else:\n tie()\n show_all_cards(dealer_hand, player_hand)\n break\n \n play_again = input(\"\\nDo you want to play again?\\nEnter 'y' or 'n'\\n\")\n if play_again[0].lower() == 'y':\n if player_chips.total > 0:\n continue\n else:\n print('Sorry you do not have any chips left.\\nThank you for playing with us!')\n game_session = False\n else:\n game_session = False\n print('Thanks for playing with us')\n",
"Welcome to the game of Blackjack\n\n\nHow much would you like to bet?\n(You have total of 100 chips)\n50\nYou bet 50 chips on this round!\n\nDealer's Card: <Card Hidden> Queen of Hearts\n\nYour Cards:\nQueen of Diamonds\nKing of Hearts\nYour current value: 20\n\nWould you like to Hit or Stand?\nEnter 'h' or 's'\ns\nPlayer wants to stay\n\n\nCongratulation! You WON!!!\nYou won 50 chips\nYou have 150 chips in total.\n\nDealer's Cards:\nFive of Hearts\nQueen of Hearts\nThree of Hearts\nDealer's final value: 18\n\nYour Cards:\nQueen of Diamonds\nKing of Hearts\nYour final value: 20\n\nDo you want to play again?\nEnter 'y' or 'n'\nn\nThanks for playing with us\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a19d4af4f33786efaa62ee7c127149672777916
| 368,170 |
ipynb
|
Jupyter Notebook
|
notebook/Ratio_Space_Demo.ipynb
|
yygr/datascience_utility
|
aa6aa37508e46ab3568805dd1bb514ef10652240
|
[
"MIT"
] | null | null | null |
notebook/Ratio_Space_Demo.ipynb
|
yygr/datascience_utility
|
aa6aa37508e46ab3568805dd1bb514ef10652240
|
[
"MIT"
] | null | null | null |
notebook/Ratio_Space_Demo.ipynb
|
yygr/datascience_utility
|
aa6aa37508e46ab3568805dd1bb514ef10652240
|
[
"MIT"
] | null | null | null | 724.744094 | 112,608 | 0.946201 |
[
[
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport sys\nsys.path.append('../src')\nfrom ratio_space import ratiospace_division, nCk, origin_vector\nfrom myutils import get_figratio, plot_hist, cumulative_bins",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(2, 3, sharex=True, sharey=True, figsize=(9, 9))\nN = 3\nK = 10\nfor N, _ax in enumerate(ax.flat):\n N += 2\n p = origin_vector(ratiospace_division(N,K))\n if N <= 2:\n _ax.scatter(p, np.zeros(len(p)), 10, 'k')\n else:\n _ax.scatter(p.T[0], p.T[1], 10, 'k', alpha=0.1)\n p = origin_vector(ratiospace_division(N,1))\n poly = plt.Polygon(p[:,:2], alpha=0.2, fc='g')\n _ax.add_patch(poly)\n _ax.set_aspect('equal')\n _ax.set_ylim(-1.1, 1.1)\n _ax.set_xlim(-1.1, 1.1)\nplt.tight_layout()",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(3, 3, figsize=(14, 14))\nN = 6 \nK = 10 \nfor N, _ax in enumerate(ax.flat, start=2):\n p = origin_vector(ratiospace_division(N, 1))\n if N==2:\n _ax.scatter(p, np.zeros(p.shape), 1, 'r')\n _ax.plot(p, np.zeros(p.shape), 'g', alpha=0.3, linewidth=10)\n else:\n _ax.scatter(p.T[0], p.T[1], 1, 'r')\n cons_base = (0.1, 0.9)\n cons = [(0.1, 0.9)]*N\n P = ratiospace_division(N, K, constraints=cons)\n p = origin_vector(P)\n df = pd.DataFrame(np.c_[P, p])\n if N==2:\n _ax.scatter(p, np.zeros(p.shape), 10, 'g', label='1d-Simplex')\n else:\n _ax.scatter(p.T[0], p.T[1], 10, 'g', alpha=0.3, label=f'Constraint:{cons_base}')\n p = origin_vector(ratiospace_division(N,1))\n poly = plt.Polygon(p[:,:2], alpha=0.2, fc='g', label=f'{N-1}d-Simplex')\n _ax.add_patch(poly)\n _ax.legend()\n _ax.set_ylim(-1.1, 1.1)\n _ax.set_xlim(-1.1, 1.1)\n _ax.set_aspect('equal')\nplt.savefig('../image/simplex_grid.png')",
"_____no_output_____"
]
],
[
[
"# Asset Portfolio",
"_____no_output_____"
]
],
[
[
"nb_ex = 50\n\ndic = {\n 'Stock' : (0.05, 0.1),\n 'FX' : (0.05, 0.1),\n 'Deposit' : (0.1, 0.4),\n 'Real Estate' : (0.3, 0.7),\n}\n\nkeys = pd.DataFrame(dic, index=['min', 'max']).sort_values('max', 1).columns\ncons = [dic[x] for x in keys]\n \nfor n, c in zip(keys, cons):\n print(n, c)\n\ngrid_data = ratiospace_division(4, nb_ex, constraints=cons)\nprint('Portion combination', grid_data.shape)\n\nfig, ax = plt.subplots(ncols=len(keys), figsize=(16, 3))\nlabel = np.digitize(grid_data[:,0], np.unique(grid_data[:,0]))-1\nunique_label = np.unique(label)\ncolor = unique_label/unique_label.max()\ncolor = plt.get_cmap('rainbow')(color)\ncolor[:, -1] = 0.6\nfor _ax, data, k in zip(ax.flat, grid_data.T, keys):\n unique_data = len(np.unique(data))\n h, e = np.histogram(data, bins=unique_data)\n title=f'{k} [{len(h)}cut]'\n _ax.set_title(title)\n _btm = np.zeros(len(e[:-1]))\n for l, c in zip(np.unique(label), color):\n idx = np.where(label==l)[0]\n h, _ = np.histogram(data[idx], bins=e)\n _ax.bar(e[:-1], h, e[1:]-e[:-1], _btm, align='edge', fc=c, ec='w')\n _btm += h\n _e = e if len(e)<13 else e[np.linspace(0,len(e)-1,13).astype(int)]\n _ax.set_xticks(_e)\n _ax.set_xticklabels([f'{x:.3f}'for x in _e], rotation=90)\nplt.tight_layout()",
"Stock (0.05, 0.1)\nFX (0.05, 0.1)\nDeposit (0.1, 0.4)\nReal Estate (0.3, 0.7)\nPortion combination (135, 4)\n"
],
[
"p = np.ones((3, 3))\nfor i in nCk(3, 2):\n j = list(set(np.arange(3))-set(i))\n for _i in i:\n p[j, _i] = cons[_i+1][0]\n m = p[j]==1\n p[j, j] -= p[j, i].sum()\n \ndef plot_ratiospace(grid_data, label, p, fname=None):\n ul = np.unique(label)\n nc, nr = get_figratio(len(ul))\n fig, ax = plt.subplots(nr, nc, figsize=(5*nc, 5*nr), sharex=True, sharey=True)\n color = plt.get_cmap('rainbow')(label/label.max())\n for _ax, _ul in zip(ax.flat, ul):\n idx = np.where(_ul==label)[0]\n poly = plt.Polygon(origin_vector(p), alpha=0.1, color='b')\n _p = origin_vector(p)\n _ax.scatter(_p.T[0], _p.T[1], 4, c='r')\n _ax.set_title(f'{keys[0]}:{grid_data[idx][:,0][0]:.0%}')\n for i, _p in enumerate(origin_vector(p)):\n _i = np.argmax(p[i])\n _text = f'{keys[_i+1]}:{p[_i].max():.0%}'\n _ax.text(_p[0], _p[1], _text)\n _ax.add_patch(poly)\n data = origin_vector(grid_data[idx][:,1:])\n _ax.scatter(data.T[0], data.T[1], 10, marker='H', c=color[idx], alpha=0.6)\n _ax.set_aspect('equal')\n if fname:\n fig.suptitle(fname.split('/')[-1])\n plt.savefig(fname)\n plt.close()\n\nplot_ratiospace(grid_data, label, p)",
"_____no_output_____"
],
[
"def gen_matrix(edges, data, value=None, func=[np.mean, np.std], return_labels=False, debug=False):\n labels = np.array([np.digitize(d, e, right=True) for e,d in zip(edges, data)]) -1\n _shape = [len(x)-1 for x in edges]\n _shape.append(len(edges))\n if debug:\n print(_shape)\n matrix = np.zeros(tuple(_shape))\n mask = np.ones(matrix.shape[:-1])\n if not value is None:\n stats = np.zeros(tuple(_shape[:-1]+[len(func)]))\n if debug:\n print(data.shape, matrix.shape, labels.shape, mask.shape)\n check = 0\n for i in zip(*np.where(mask)):\n _idx = np.arange(data.shape[-1])\n for j, k in enumerate(i):\n _tmp = np.where(labels[j][_idx]==k)[0]\n _idx = _idx[_tmp]\n if len(_idx)==0:\n break\n if len(_idx)==0:\n continue\n for j in range(len(i)):\n _data = data[j][_idx]\n if len(_data)>0:\n matrix[i][j] = _data.mean()\n if debug:\n print(i, len(_idx), matrix[i], end='\\n')\n check += len(_idx)\n if value is None:\n continue\n for j, _f in enumerate(func):\n stats[i][j] = _f(value[_idx])\n if debug:\n print(check)\n if return_labels:\n if not value is None:\n return matrix, stats, labels\n return matrix, labels\n if not value is None:\n return matrix, stats\n return matrix",
"_____no_output_____"
]
],
[
[
"# Example",
"_____no_output_____"
]
],
[
[
"from sklearn import datasets as ds",
"_____no_output_____"
],
[
"dic = ds.load_boston()\ndic.keys()",
"_____no_output_____"
],
[
"df = pd.DataFrame(dic.data, columns=dic.feature_names)\nn = df.shape[1]\nnr, nc = get_figratio(n)\nfig, ax = plt.subplots(nc, nr, figsize=(16, 9 ))\nprint(len(ax), df.shape, nr, nc)\ndf.hist(ax=ax.flat[:n])\nfig.tight_layout()",
"3 (506, 13) 5 3\n"
],
[
"base_bins = {\n 'B' : [0, 330, 400],\n 'CHAS' : [0, 0.1, 0.9, 1],\n 'CRIM' : [0, 10, 100],\n 'INDUS' : [0, 15, 40],\n 'RAD' : [0, 12, 30],\n 'TAX' : [100, 500, 600, 800],\n 'ZN' : [0, 10, 100]\n}",
"_____no_output_____"
],
[
"for line in dic.DESCR.split('\\n'):\n print(line)",
"Boston House Prices dataset\n===========================\n\nNotes\n------\nData Set Characteristics: \n\n :Number of Instances: 506 \n\n :Number of Attributes: 13 numeric/categorical predictive\n \n :Median Value (attribute 14) is usually the target\n\n :Attribute Information (in order):\n - CRIM per capita crime rate by town\n - ZN proportion of residential land zoned for lots over 25,000 sq.ft.\n - INDUS proportion of non-retail business acres per town\n - CHAS Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)\n - NOX nitric oxides concentration (parts per 10 million)\n - RM average number of rooms per dwelling\n - AGE proportion of owner-occupied units built prior to 1940\n - DIS weighted distances to five Boston employment centres\n - RAD index of accessibility to radial highways\n - TAX full-value property-tax rate per $10,000\n - PTRATIO pupil-teacher ratio by town\n - B 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town\n - LSTAT % lower status of the population\n - MEDV Median value of owner-occupied homes in $1000's\n\n :Missing Attribute Values: None\n\n :Creator: Harrison, D. and Rubinfeld, D.L.\n\nThis is a copy of UCI ML housing dataset.\nhttp://archive.ics.uci.edu/ml/datasets/Housing\n\n\nThis dataset was taken from the StatLib library which is maintained at Carnegie Mellon University.\n\nThe Boston house-price data of Harrison, D. and Rubinfeld, D.L. 'Hedonic\nprices and the demand for clean air', J. Environ. Economics & Management,\nvol.5, 81-102, 1978. Used in Belsley, Kuh & Welsch, 'Regression diagnostics\n...', Wiley, 1980. N.B. Various transformations are used in the table on\npages 244-261 of the latter.\n\nThe Boston house-price data has been used in many machine learning papers that address regression\nproblems. \n \n**References**\n\n - Belsley, Kuh & Welsch, 'Regression diagnostics: Identifying Influential Data and Sources of Collinearity', Wiley, 1980. 244-261.\n - Quinlan,R. (1993). Combining Instance-Based and Model-Based Learning. In Proceedings on the Tenth International Conference of Machine Learning, 236-243, University of Massachusetts, Amherst. Morgan Kaufmann.\n - many more! (see http://archive.ics.uci.edu/ml/datasets/Housing)\n\n"
],
[
"_ = plot_hist(dic.target)",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a19f0197ed40e4d1d186dbdceb50c8d69dd68db
| 4,030 |
ipynb
|
Jupyter Notebook
|
notebooks/trino-wri_gppd-demo.ipynb
|
os-climate/data-platform-demo
|
99dfeecc7058479a9f9989efb7a77327b4cd8a22
|
[
"FTL"
] | null | null | null |
notebooks/trino-wri_gppd-demo.ipynb
|
os-climate/data-platform-demo
|
99dfeecc7058479a9f9989efb7a77327b4cd8a22
|
[
"FTL"
] | 39 |
2021-09-09T21:42:19.000Z
|
2022-03-21T15:30:08.000Z
|
notebooks/trino-wri_gppd-demo.ipynb
|
os-climate/data-platform-demo
|
99dfeecc7058479a9f9989efb7a77327b4cd8a22
|
[
"FTL"
] | 2 |
2021-09-16T18:25:23.000Z
|
2021-09-30T22:07:22.000Z
| 25.1875 | 105 | 0.564764 |
[
[
[
"#pip install trino urllib3 pandas",
"_____no_output_____"
],
[
"# two possible apis to generate a trino connection:\nimport trino\n# from pyhive import presto\n\n# pandas dfs\nimport pandas as pd\n\nimport urllib3\nurllib3.disable_warnings()",
"_____no_output_____"
],
[
"from dotenv import dotenv_values, load_dotenv\nimport os\nimport pathlib\n\ndotenv_dir = os.environ.get('CREDENTIAL_DOTENV_DIR', os.environ.get('PWD', '/opt/app-root/src'))\ndotenv_path = pathlib.Path(dotenv_dir) / 'credentials.env'\nif os.path.exists(dotenv_path):\n load_dotenv(dotenv_path=dotenv_path,override=True)",
"_____no_output_____"
],
[
"JWT_TOKEN = os.environ['TRINO_PASSWD']\nconn = trino.dbapi.connect(\n host=os.environ['TRINO_HOST'],\n port=int(os.environ['TRINO_PORT']),\n user=os.environ['TRINO_USER'],\n http_scheme='https',\n auth=trino.auth.JWTAuthentication(JWT_TOKEN),\n verify=True,\n)\ncur = conn.cursor()",
"_____no_output_____"
],
[
"cur.execute('show catalogs')\ncur.fetchall()",
"_____no_output_____"
],
[
"# Show available schemas to ensure trino connection is set correctly\ncur.execute('show schemas in osc_datacommons_dev')\ncur.fetchall()",
"_____no_output_____"
],
[
"cur.execute('show tables from osc_datacommons_dev.wri_gppd')\ncur.fetchall()",
"_____no_output_____"
],
[
"# CNAME is Catalog Name (osc_datacommons_dev)\n# SNAME is Schema Name (wri_gppd)\n# TNAME is Table Name (plants)\n\ndef trino_to_df(cname, sname, tname):\n trino_table = '.'.join([cname, sname, tname])\n cur.execute('show columns from ' + trino_table)\n columns_df = pd.DataFrame(cur.fetchall()).dropna(axis=1,how='all')\n \n cur.execute('select * from ' + trino_table)\n df = pd.DataFrame(cur.fetchall())\n df.columns = columns_df.iloc[:, 0]\n return df",
"_____no_output_____"
],
[
"df = trino_to_df('osc_datacommons_dev', 'wri_gppd', 'plants')\ndisplay(df.info(verbose=True))\ndf.head()",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a1a2f01cd7432afad7da73f37e292161bdb404d
| 161,106 |
ipynb
|
Jupyter Notebook
|
Deep Learning-SEMICOLON/2. Data Analytics/Perceptron and Gradient Descent.ipynb
|
Ashleshk/Machine-Learning-Data-Science-Deep-Learning
|
03357ab98155bf73b8f1d2fd53255cc16bea2333
|
[
"MIT"
] | 1 |
2020-05-24T06:55:31.000Z
|
2020-05-24T06:55:31.000Z
|
Deep Learning-SEMICOLON/2. Data Analytics/Perceptron and Gradient Descent.ipynb
|
Ashleshk/Machine-Learning-Data-Science-Deep-Learning
|
03357ab98155bf73b8f1d2fd53255cc16bea2333
|
[
"MIT"
] | null | null | null |
Deep Learning-SEMICOLON/2. Data Analytics/Perceptron and Gradient Descent.ipynb
|
Ashleshk/Machine-Learning-Data-Science-Deep-Learning
|
03357ab98155bf73b8f1d2fd53255cc16bea2333
|
[
"MIT"
] | null | null | null | 396.812808 | 88,090 | 0.932988 |
[
[
[
"### Perceptron",
"_____no_output_____"
]
],
[
[
"from IPython.display import Image\nImage(\"perceptron.png\")",
"_____no_output_____"
],
[
"### Perceptron Training ",
"_____no_output_____"
],
[
"Image(\"perceptrontraining.png\")",
"_____no_output_____"
]
],
[
[
"# Gradient Descent ",
"_____no_output_____"
],
[
"### 1. Initialize the weights with random values. \n### 2. Choose a learning rate between 0 to 1.\n### 3. Till the error is almost constant: \n#### </t> 3.1 calculate change in weight ▲w\n#### </t> 3.2 update the weight ",
"_____no_output_____"
]
],
[
[
"Image('gd.png')",
"_____no_output_____"
],
[
"import numpy as np\nimport pandas as pd \nfrom matplotlib import pyplot as plt\nfrom sklearn.linear_model import perceptron\nfrom sklearn.model_selection import train_test_split\n%matplotlib inline ",
"_____no_output_____"
],
[
"data=pd.read_csv('mnist.csv')",
"_____no_output_____"
],
[
"df_x=data.iloc[:,1:]\ndf_y=data.iloc[:,0]",
"_____no_output_____"
],
[
"x_train, x_test, y_train, y_test = train_test_split(df_x, df_y, test_size=0.2, random_state=4)",
"_____no_output_____"
],
[
"per=Perceptron()",
"_____no_output_____"
],
[
"per.fit(x_train,y_train)",
"_____no_output_____"
],
[
"pred=per.predict(x_test)",
"_____no_output_____"
],
[
"pred",
"_____no_output_____"
],
[
"a=y_test.values",
"_____no_output_____"
],
[
"a\ncount=0",
"_____no_output_____"
],
[
"for i in range(len(pred)):\n if pred[i]==a[i]:\n count=count+1",
"_____no_output_____"
],
[
"count",
"_____no_output_____"
],
[
"len(pred)",
"_____no_output_____"
],
[
"7224/8400.0",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a1a30ec832db05ed786953c098021fbf265a349
| 3,759 |
ipynb
|
Jupyter Notebook
|
README.ipynb
|
shubhamtyagii/MPC--CarND
|
c1354688e463411cb514b463e36ad9134b6ed3b3
|
[
"MIT"
] | null | null | null |
README.ipynb
|
shubhamtyagii/MPC--CarND
|
c1354688e463411cb514b463e36ad9134b6ed3b3
|
[
"MIT"
] | null | null | null |
README.ipynb
|
shubhamtyagii/MPC--CarND
|
c1354688e463411cb514b463e36ad9134b6ed3b3
|
[
"MIT"
] | null | null | null | 37.969697 | 347 | 0.564246 |
[
[
[
"\n## Udacity SDCND - Term 2: MPC Project ##\n\n### I. The Model\n\nI have used **classroom model**.\n\ni. State\n - x: position in x direction\n - y: position in y direction\n - psi: steering angle\n - v: velocity of the car\n - cte: cross-track error along the y axis\n - epsi: error in the steering angle\n \nii. Actuators\n - delta: applied steering angle\n - a: applied throttle\n \niii. Update Equations\n - x<sub>t</sub> = x<sub>t-1</sub> \\* v<sub>t-1</sub> \\* cos(psi<sub>t-1</sub>) \\* dt\n - y<sub>t</sub> = y<sub>t-1</sub> \\* v<sub>t-1</sub> \\* sin(psi<sub>t-1</sub>) \\* dt\n - psi<sub>t</sub> = psi<sub>t-1</sub> + (v<sub>t-1</sub>/Lf) \\* delta<sub>t-1</sub> \\* dt\n - v<sub>t</sub> = v<sub>t-1</sub> + a<sub>t-1</sub> + dt\n - cte<sub>t</sub> = (f<sub>t-1</sub> - y<sub>t-1</sub>) + (v<sub>t-1</sub> \\* sin(epsi<sub>t-1</sub>) \\* dt)\n - epsi<sub>t</sub> = ((psi<sub>t-1</sub> - psides<sub>t-1</sub>) - ((v<sub>t-1</sub>/Lf) \\* delta<sub>t-1</sub> \\* dt))\n\n**f** is the value of the 3rd degree polynomial representing the reference line at the current value of x. \n**psides** is the desired psi, which is the tangential angle of the derivative of the polynomial at that point.\n\n### II. Timestep Length and Elapsed Duration\n\nThe final values chosen are **N=10** and **dt=0.1**.\n\nIf the value of N is too small, we cannot predict the future well. If value is too large then we may plan for a long future which not be what we are expecting. The values for N and dt are 10 and 0.1 respectively. These values were just a part of hit and trial process. I tested with 7/0.5; 9,0.25; 18,0.05 also in order to fix 10 and 0.1.\n\n### III. Polynomial Fitting\n\nThe waypoint co-ordinates received from the simulator are first converted into cars co-ordinate system where car is the origin, I have done it in `Main.cpp::Lines 104 - 114`. \nThe converted co-ordinates are fit to a polynomial at `Main.cpp::Lines 124` using the polyfit method.\n\n### IV. Model Predictive Control with Latency\n\nIn order to account for the 100 ms latency, the initial state of the car supplied by the simulator is updated using the same model descibed above. \nHere, the **latency** period is used as the time gap **dt**.\n\nBelow is the code block from `Main.cpp`.\n\n```cpp\nconst double current_px = 0.0 + v * act_latency;\nconst double current_py = 0.0;\nconst double current_psi = 0.0 + v * (-delta) / Lf * act_latency;\nconst double current_v = v + a * act_latency;\nconst double current_cte = cte + v * sin(epsi) * act_latency;\nconst double current_epsi = epsi + v * (-delta) / Lf * act_latency;\n```\n",
"_____no_output_____"
]
]
] |
[
"markdown"
] |
[
[
"markdown"
]
] |
4a1a3ad5b6b391e2242a783d32d1eb2cddd5e51b
| 114,219 |
ipynb
|
Jupyter Notebook
|
notebooks/Dataset A - Cardio/Synthetic data evaluation/Resemblance/2_Multivariate_Resemblance_DatasetA.ipynb
|
Vicomtech/STDG-evaluation-metrics
|
4662c2cc60f7941723a876a6032b411e40f5ec62
|
[
"MIT"
] | 4 |
2021-08-20T18:21:09.000Z
|
2022-01-12T09:30:29.000Z
|
notebooks/Dataset A - Cardio/Synthetic data evaluation/Resemblance/2_Multivariate_Resemblance_DatasetA.ipynb
|
Vicomtech/STDG-evaluation-metrics
|
4662c2cc60f7941723a876a6032b411e40f5ec62
|
[
"MIT"
] | null | null | null |
notebooks/Dataset A - Cardio/Synthetic data evaluation/Resemblance/2_Multivariate_Resemblance_DatasetA.ipynb
|
Vicomtech/STDG-evaluation-metrics
|
4662c2cc60f7941723a876a6032b411e40f5ec62
|
[
"MIT"
] | null | null | null | 150.09067 | 40,928 | 0.751679 |
[
[
[
"# Multivariate Resemblance Analysis (MRA) Dataset A\nIn this notebook the multivariate resemblance analysis of Dataset A is performed for all STDG approaches.",
"_____no_output_____"
]
],
[
[
"#import libraries\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport os\nprint('Libraries imported!!')",
"Libraries imported!!\n"
],
[
"#define directory of functions and actual directory\nHOME_PATH = '' #home directory of the project\nFUNCTIONS_DIR = 'EVALUATION FUNCTIONS/RESEMBLANCE'\nACTUAL_DIR = os.getcwd()\n\n#change directory to functions directory\nos.chdir(HOME_PATH + FUNCTIONS_DIR)\n\n#import functions for univariate resemblance analisys\nfrom multivariate_resemblance import get_numerical_correlations\nfrom multivariate_resemblance import plot_correlations\nfrom multivariate_resemblance import get_categorical_correlations\nfrom multivariate_resemblance import compute_mra_score\n\n#change directory to actual directory\nos.chdir(ACTUAL_DIR)\nprint('Functions imported!!')",
"Functions imported!!\n"
]
],
[
[
"## 1. Read real and synthetic datasets\nIn this part real and synthetic datasets are read.",
"_____no_output_____"
]
],
[
[
"#Define global variables\nDATA_TYPES = ['Real','GM','SDV','CTGAN','WGANGP']\nSYNTHESIZERS = ['GM','SDV','CTGAN','WGANGP']\nFILEPATHS = {'Real' : HOME_PATH + 'REAL DATASETS/TRAIN DATASETS/A_Diabetes_Data_Real_Train.csv',\n 'GM' : HOME_PATH + 'SYNTHETIC DATASETS/GM/A_Diabetes_Data_Synthetic_GM.csv',\n 'SDV' : HOME_PATH + 'SYNTHETIC DATASETS/SDV/A_Diabetes_Data_Synthetic_SDV.csv',\n 'CTGAN' : HOME_PATH + 'SYNTHETIC DATASETS/CTGAN/A_Diabetes_Data_Synthetic_CTGAN.csv',\n 'WGANGP' : HOME_PATH + 'SYNTHETIC DATASETS/WGANGP/A_Diabetes_Data_Synthetic_WGANGP.csv'}\ncategorical_columns = ['gender','age','admission_type_id','discharge_disposition_id','admission_source_id','max_glu_serum',\n 'A1Cresult','change','diabetesMed','readmitted']\ndata = dict()",
"_____no_output_____"
],
[
"#iterate over all datasets filepaths and read each dataset\nfor name, path in FILEPATHS.items() :\n data[name] = pd.read_csv(path)\n for col in categorical_columns :\n data[name][col] = data[name][col].astype('category')\ndata",
"_____no_output_____"
]
],
[
[
"## 2. Plot PPC matrixes and calculate matrixes norms",
"_____no_output_____"
]
],
[
[
"#compute correlation matrixes for all datasets\ncors_numerical = dict()\nnorms_numerical = dict()\nfor name in DATA_TYPES :\n cors_numerical[name], norms_numerical[name] = get_numerical_correlations(data[name])\nnorms_numerical",
"_____no_output_____"
],
[
"fig, axs = plt.subplots(nrows=1, ncols=5, figsize=(15, 2.5))\naxs_idxs = range(6)\nidx = dict(zip(DATA_TYPES,axs_idxs))\n\nfor name_idx, name in enumerate(DATA_TYPES) :\n ax = axs[idx[name]]\n matrix = cors_numerical[name]\n \n if name_idx != len(DATA_TYPES) - 1:\n plot_correlations(matrix, ax, color_bar=False)\n else:\n plot_correlations(matrix, ax, color_bar=True)\n \n if name_idx > 0:\n ax.set_yticks([]) \n \n if name == 'Real' :\n ax.set_title(name)\n else :\n score = compute_mra_score(cors_numerical['Real'], matrix)\n ax.set_title(name + ' (' + str(score) + ')')\n \nfig.savefig('MULTIVARIATE RESEMBLANCE RESULTS/PPC_Matrices.svg', bbox_inches='tight')",
"_____no_output_____"
]
],
[
[
"## 3. Plot correlations for categorical variables and calculate matrixes norms",
"_____no_output_____"
]
],
[
[
"#compute correlation matrixes for all datasets\ncors_categorical = dict()\nnorms_categorical = dict()\nfor name in DATA_TYPES :\n cors_categorical[name], norms_categorical[name] = get_categorical_correlations(data[name])\nnorms_categorical",
"_____no_output_____"
],
[
"fig, axs = plt.subplots(nrows=1, ncols=5, figsize=(15, 2.5))\naxs_idxs = range(6)\nidx = dict(zip(DATA_TYPES,axs_idxs))\nfirst = True\n\nfor name_idx, name in enumerate(DATA_TYPES) :\n ax = axs[idx[name]]\n matrix = cors_categorical[name]\n \n if name_idx != len(DATA_TYPES) - 1:\n plot_correlations(matrix, ax, color_bar=False)\n else:\n plot_correlations(matrix, ax, color_bar=True)\n \n if name_idx > 0:\n ax.set_yticks([]) \n\n if name == 'Real' :\n ax.set_title(name)\n else :\n score = compute_mra_score(cors_categorical['Real'], matrix)\n ax.set_title(name + ' (' + str(score) + ')')\n \nfig.savefig('MULTIVARIATE RESEMBLANCE RESULTS/Categorical_Matrices.svg', bbox_inches='tight')",
"_____no_output_____"
]
],
[
[
"## 4. Explore the results",
"_____no_output_____"
]
],
[
[
"norms_numerical",
"_____no_output_____"
],
[
"norms_categorical",
"_____no_output_____"
],
[
"norms_data = [np.asarray(list(norms_numerical.values())), np.asarray(list(norms_categorical.values()))]\ndf_norms = pd.DataFrame(data=norms_data, columns=DATA_TYPES, index=['PPC_MATRIX_NORMS','CATEGORICAL_CORS_MATRIX_NORMS'])\ndf_norms.to_csv('MULTIVARIATE RESEMBLANCE RESULTS/Correlation_Matrix_Norms.csv')\ndf_norms",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
4a1a454d19f2a07f9c95c293a64f2cb1a9b444a6
| 764,360 |
ipynb
|
Jupyter Notebook
|
QCHack_Qiskit_Pusle_higher_energy_states.ipynb
|
mingweii/QCHack2021
|
2ec6b42b8c567ffd0fc3f603e338b28b7af9f926
|
[
"MIT"
] | 3 |
2021-04-11T17:48:26.000Z
|
2021-04-30T05:12:46.000Z
|
QCHack_Qiskit_Pusle_higher_energy_states.ipynb
|
wslu42/QCHack2021
|
2ec6b42b8c567ffd0fc3f603e338b28b7af9f926
|
[
"MIT"
] | null | null | null |
QCHack_Qiskit_Pusle_higher_energy_states.ipynb
|
wslu42/QCHack2021
|
2ec6b42b8c567ffd0fc3f603e338b28b7af9f926
|
[
"MIT"
] | 1 |
2021-04-16T02:19:40.000Z
|
2021-04-16T02:19:40.000Z
| 302.357595 | 64,236 | 0.924156 |
[
[
[
"from qiskit.tools.jupyter import *\n\nfrom qiskit import IBMQ\nIBMQ.load_account()\n#provider = IBMQ.get_provider(hub='ibm-q', group='open', project='main')\nprovider=IBMQ.get_provider(hub='ibm-q-research', group='uni-maryland-1', project='main')\nbackend = provider.get_backend('ibmq_armonk')",
"_____no_output_____"
],
[
"backend_config = backend.configuration()\nassert backend_config.open_pulse, \"Backend doesn't support Pulse\"",
"_____no_output_____"
],
[
"dt = backend_config.dt\nprint(f\"Sampling time: {dt*1e9} ns\") # The configuration returns dt in seconds, so multiply by\n # 1e9 to get nanoseconds",
"Sampling time: 0.2222222222222222 ns\n"
],
[
"backend_defaults = backend.defaults()",
"_____no_output_____"
],
[
"import numpy as np\n\n# unit conversion factors -> all backend properties returned in SI (Hz, sec, etc)\nGHz = 1.0e9 # Gigahertz\nMHz = 1.0e6 # Megahertz\nus = 1.0e-6 # Microseconds\nns = 1.0e-9 # Nanoseconds\n\n# We will find the qubit frequency for the following qubit.\nqubit = 0\n\n# The sweep will be centered around the estimated qubit frequency.\ncenter_frequency_Hz = backend_defaults.qubit_freq_est[qubit] # The default frequency is given in Hz\n # warning: this will change in a future release\nprint(f\"Qubit {qubit} has an estimated frequency of {center_frequency_Hz / GHz} GHz.\")\n\n# scale factor to remove factors of 10 from the data\nscale_factor = 1e-14\n\n# We will sweep 40 MHz around the estimated frequency\nfrequency_span_Hz = 20 * MHz\n# in steps of 1 MHz.\nfrequency_step_Hz = 1 * MHz\n\n# We will sweep 20 MHz above and 20 MHz below the estimated frequency\nfrequency_min = center_frequency_Hz - frequency_span_Hz / 2\nfrequency_max = center_frequency_Hz + frequency_span_Hz / 2\n# Construct an np array of the frequencies for our experiment\nfrequencies_GHz = np.arange(frequency_min / GHz, \n frequency_max / GHz, \n frequency_step_Hz / GHz)\n\nprint(f\"The sweep will go from {frequency_min / GHz} GHz to {frequency_max / GHz} GHz \\\nin steps of {frequency_step_Hz / MHz} MHz.\")",
"Qubit 0 has an estimated frequency of 4.971855969373953 GHz.\nThe sweep will go from 4.961855969373953 GHz to 4.981855969373953 GHz in steps of 1.0 MHz.\n"
],
[
"# number of shots for our experiments\nNUM_SHOTS = 1024",
"_____no_output_____"
],
[
"# samples need to be multiples of 16\ndef get_closest_multiple_of_16(num):\n return int(num + 8 ) - (int(num + 8 ) % 16)",
"_____no_output_____"
],
[
"from qiskit import pulse # This is where we access all of our Pulse features!\nfrom qiskit.pulse import Play\n# This Pulse module helps us build sampled pulses for common pulse shapes\nfrom qiskit.pulse import library as pulse_lib\n\n\n# Drive pulse parameters (us = microseconds)\ndrive_sigma_us = 0.075 # This determines the actual width of the gaussian\ndrive_samples_us = drive_sigma_us*8 # This is a truncating parameter, because gaussians don't have \n # a natural finite length\n\ndrive_sigma = get_closest_multiple_of_16(drive_sigma_us * us /dt) # The width of the gaussian in units of dt\ndrive_samples = get_closest_multiple_of_16(drive_samples_us * us /dt) # The truncating parameter in units of dt\ndrive_amp = 0.05\n# Drive pulse samples\ndrive_pulse = pulse_lib.gaussian(duration=drive_samples,\n sigma=drive_sigma,\n amp=drive_amp,\n name='freq_sweep_excitation_pulse')",
"_____no_output_____"
],
[
"# Find out which group of qubits need to be acquired with this qubit\nmeas_map_idx = None\nfor i, measure_group in enumerate(backend_config.meas_map):\n if qubit in measure_group:\n meas_map_idx = i\n break\nassert meas_map_idx is not None, f\"Couldn't find qubit {qubit} in the meas_map!\"",
"_____no_output_____"
],
[
"inst_sched_map = backend_defaults.instruction_schedule_map\nmeasure = inst_sched_map.get('measure', qubits=backend_config.meas_map[meas_map_idx])",
"_____no_output_____"
],
[
"### Collect the necessary channels\ndrive_chan = pulse.DriveChannel(qubit)\nmeas_chan = pulse.MeasureChannel(qubit)\nacq_chan = pulse.AcquireChannel(qubit)",
"_____no_output_____"
],
[
"# Create the base schedule\n# Start with drive pulse acting on the drive channel\nschedule = pulse.Schedule(name='Frequency sweep')\nschedule += Play(drive_pulse, drive_chan)\n# The left shift `<<` is special syntax meaning to shift the start time of the schedule by some duration\nschedule += measure << schedule.duration\n\n# Create the frequency settings for the sweep (MUST BE IN HZ)\nfrequencies_Hz = frequencies_GHz*GHz\nschedule_frequencies = [{drive_chan: freq} for freq in frequencies_Hz]",
"_____no_output_____"
],
[
"schedule.draw(label=True)",
"/Users/mingtsoweii/miniconda3/lib/python3.8/site-packages/qiskit/pulse/schedule.py:1534: DeprecationWarning: Legacy pulse drawer is deprecated. Specified arguments label are deprecated. Please check the API document of new pulse drawer `qiskit.visualization.pulse_drawer_v2`.\n warnings.warn('Legacy pulse drawer is deprecated. '\n"
],
[
"from qiskit import assemble\n\nnum_shots_per_frequency = 1024\nfrequency_sweep_program = assemble(schedule,\n backend=backend, \n meas_level=1,\n meas_return='avg',\n shots=num_shots_per_frequency,\n schedule_los=schedule_frequencies)",
"_____no_output_____"
],
[
"job = backend.run(frequency_sweep_program)",
"<ipython-input-14-b1426b634c8f>:1: DeprecationWarning: Passing a Qobj to Backend.run is deprecated and will be removed in a future release. Please pass in circuits or pulse schedules instead.\n job = backend.run(frequency_sweep_program)\n"
],
[
"print(job.job_id())\nfrom qiskit.tools.monitor import job_monitor\njob_monitor(job)",
"60726dd6159ff958bc3f076d\nJob Status: job has successfully run\n"
],
[
"frequency_sweep_results = job.result(timeout=120) # timeout parameter set to 120 seconds",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\n\nsweep_values = []\nfor i in range(len(frequency_sweep_results.results)):\n # Get the results from the ith experiment\n res = frequency_sweep_results.get_memory(i)*scale_factor\n # Get the results for `qubit` from this experiment\n sweep_values.append(res[qubit])\n\nplt.scatter(frequencies_GHz, np.real(sweep_values), color='black') # plot real part of sweep values\nplt.xlim([min(frequencies_GHz), max(frequencies_GHz)])\nplt.xlabel(\"Frequency [GHz]\")\nplt.ylabel(\"Measured signal [a.u.]\")\nplt.show()",
"_____no_output_____"
],
[
"from scipy.optimize import curve_fit\n\ndef fit_function(x_values, y_values, function, init_params):\n fitparams, conv = curve_fit(function, x_values, y_values, init_params)\n y_fit = function(x_values, *fitparams)\n \n return fitparams, y_fit",
"_____no_output_____"
],
[
"fit_params, y_fit = fit_function(frequencies_GHz,\n np.real(sweep_values), \n lambda x, A, q_freq, B, C: (A / np.pi) * (B / ((x - q_freq)**2 + B**2)) + C,\n [-5, 4.975, 1, 5] # initial parameters for curve_fit\n )",
"_____no_output_____"
],
[
"plt.scatter(frequencies_GHz, np.real(sweep_values), color='black')\nplt.plot(frequencies_GHz, y_fit, color='red')\nplt.xlim([min(frequencies_GHz), max(frequencies_GHz)])\n\nplt.xlabel(\"Frequency [GHz]\")\nplt.ylabel(\"Measured Signal [a.u.]\")\nplt.show()",
"_____no_output_____"
],
[
"A, rough_qubit_frequency, B, C = fit_params\nrough_qubit_frequency = rough_qubit_frequency*GHz # make sure qubit freq is in Hz\nprint(f\"We've updated our qubit frequency estimate from \"\n f\"{round(backend_defaults.qubit_freq_est[qubit] / GHz, 8)} GHz to {round(rough_qubit_frequency/GHz, 8)} GHz.\")",
"We've updated our qubit frequency estimate from 4.97185597 GHz to 4.97180093 GHz.\n"
],
[
"# This experiment uses these values from the previous experiment:\n # `qubit`,\n # `measure`, and\n # `rough_qubit_frequency`.\n\n# Rabi experiment parameters\nnum_rabi_points = 50\n\n# Drive amplitude values to iterate over: 50 amplitudes evenly spaced from 0 to 0.75\ndrive_amp_min = 0\ndrive_amp_max = 0.75\ndrive_amps = np.linspace(drive_amp_min, drive_amp_max, num_rabi_points)",
"_____no_output_____"
],
[
"# Build the Rabi experiments:\n# A drive pulse at the qubit frequency, followed by a measurement,\n# where we vary the drive amplitude each time.\nrabi_schedules = []\nfor drive_amp in drive_amps:\n rabi_pulse = pulse_lib.gaussian(duration=drive_samples, amp=drive_amp, \n sigma=drive_sigma, name=f\"Rabi drive amplitude = {drive_amp}\")\n this_schedule = pulse.Schedule(name=f\"Rabi drive amplitude = {drive_amp}\")\n this_schedule += Play(rabi_pulse, drive_chan)\n # Reuse the measure instruction from the frequency sweep experiment\n this_schedule += measure << this_schedule.duration\n rabi_schedules.append(this_schedule)",
"_____no_output_____"
],
[
"rabi_schedules[-1].draw(label=True)",
"/Users/mingtsoweii/miniconda3/lib/python3.8/site-packages/qiskit/pulse/schedule.py:1534: DeprecationWarning: Legacy pulse drawer is deprecated. Specified arguments label are deprecated. Please check the API document of new pulse drawer `qiskit.visualization.pulse_drawer_v2`.\n warnings.warn('Legacy pulse drawer is deprecated. '\n"
],
[
"# Assemble the schedules into a Qobj\nnum_shots_per_point = 1024\n\nrabi_experiment_program = assemble(rabi_schedules,\n backend=backend,\n meas_level=1,\n meas_return='avg',\n shots=num_shots_per_point,\n schedule_los=[{drive_chan: rough_qubit_frequency}]\n * num_rabi_points)",
"_____no_output_____"
],
[
"print(job.job_id())\njob = backend.run(rabi_experiment_program)\njob_monitor(job)",
"60726ebb87332535ebe5ce4d\n"
],
[
"rabi_results = job.result(timeout=120)",
"_____no_output_____"
],
[
"# center data around 0\ndef baseline_remove(values):\n return np.array(values) - np.mean(values)",
"_____no_output_____"
],
[
"rabi_values = []\nfor i in range(num_rabi_points):\n # Get the results for `qubit` from the ith experiment\n rabi_values.append(rabi_results.get_memory(i)[qubit]*scale_factor)\n\nrabi_values = np.real(baseline_remove(rabi_values))\n\nplt.xlabel(\"Drive amp [a.u.]\")\nplt.ylabel(\"Measured signal [a.u.]\")\nplt.scatter(drive_amps, rabi_values, color='black') # plot real part of Rabi values\nplt.show()",
"_____no_output_____"
],
[
"fit_params, y_fit = fit_function(drive_amps,\n rabi_values, \n lambda x, A, B, drive_period, phi: (A*np.cos(2*np.pi*x/drive_period - phi) + B),\n [4, -4, 0.4, 2*np.pi])\n\nplt.scatter(drive_amps, rabi_values, color='black')\nplt.plot(drive_amps, y_fit, color='red')\n\ndrive_period = fit_params[2] # get period of rabi oscillation\n\nplt.axvline(drive_period/2, color='red', linestyle='--')\nplt.axvline(drive_period, color='red', linestyle='--')\nplt.annotate(\"\", xy=(drive_period, 0), xytext=(drive_period/2,0), arrowprops=dict(arrowstyle=\"<->\", color='red'))\nplt.annotate(\"$\\pi$\", xy=(drive_period/2-0.03, 0.1), color='red')\n\nplt.xlabel(\"Drive amp [a.u.]\", fontsize=15)\nplt.ylabel(\"Measured signal [a.u.]\", fontsize=15)\nplt.show()",
"_____no_output_____"
],
[
"pi_amp_01 = abs(drive_period / 2)\nprint(f\"Pi Amplitude = {pi_amp_01}\")",
"Pi Amplitude = 0.14096173612665405\n"
],
[
"pi_pulse_01 = pulse_lib.gaussian(duration=drive_samples,\n amp=pi_amp_01, \n sigma=drive_sigma,\n name='pi_pulse_01')",
"_____no_output_____"
],
[
"# Create the two schedules\n\n# Ground state schedule\nzero_schedule = pulse.Schedule(name=\"zero schedule\")\nzero_schedule |= measure\n\n# Excited state schedule\none_schedule = pulse.Schedule(name=\"one schedule\")\none_schedule |= pulse.Play(pi_pulse_01, drive_chan) \none_schedule |= measure << one_schedule.duration",
"_____no_output_____"
],
[
"zero_schedule.draw(label=True)",
"/Users/mingtsoweii/miniconda3/lib/python3.8/site-packages/qiskit/pulse/schedule.py:1534: DeprecationWarning: Legacy pulse drawer is deprecated. Specified arguments label are deprecated. Please check the API document of new pulse drawer `qiskit.visualization.pulse_drawer_v2`.\n warnings.warn('Legacy pulse drawer is deprecated. '\n"
],
[
"one_schedule.draw(label=True)",
"/Users/mingtsoweii/miniconda3/lib/python3.8/site-packages/qiskit/pulse/schedule.py:1534: DeprecationWarning: Legacy pulse drawer is deprecated. Specified arguments label are deprecated. Please check the API document of new pulse drawer `qiskit.visualization.pulse_drawer_v2`.\n warnings.warn('Legacy pulse drawer is deprecated. '\n"
],
[
"# Assemble the schedules into a program\nIQ_01_program = assemble([zero_schedule, one_schedule],\n backend=backend,\n meas_level=1,\n meas_return='single',\n shots=NUM_SHOTS,\n schedule_los=[{drive_chan: rough_qubit_frequency}] * 2)",
"_____no_output_____"
],
[
"IQ_01_job = backend.run(IQ_01_program)",
"<ipython-input-65-14e25bfea407>:1: DeprecationWarning: Passing a Qobj to Backend.run is deprecated and will be removed in a future release. Please pass in circuits or pulse schedules instead.\n IQ_01_job = backend.run(IQ_01_program)\n"
],
[
"print(IQ_01_job.job_id())\njob_monitor(IQ_01_job)",
"60727462159ff9dd7c3f07a2\nJob Status: job has successfully run\n"
],
[
"def get_job_data(job, average):\n \"\"\"Retrieve data from a job that has already run.\n Args:\n job (Job): The job whose data you want.\n average (bool): If True, gets the data assuming data is an average.\n If False, gets the data assuming it is for single shots.\n Return:\n list: List containing job result data. \n \"\"\"\n job_results = job.result(timeout=120) # timeout parameter set to 120 s\n result_data = []\n for i in range(len(job_results.results)):\n if average: # get avg data\n result_data.append(job_results.get_memory(i)[qubit]*scale_factor) \n else: # get single data\n result_data.append(job_results.get_memory(i)[:, qubit]*scale_factor) \n return result_data\n\ndef get_closest_multiple_of_16(num):\n \"\"\"Compute the nearest multiple of 16. Needed because pulse enabled devices require \n durations which are multiples of 16 samples.\n \"\"\"\n return (int(num) - (int(num)%16))",
"_____no_output_____"
],
[
"# Get job data (single); split for zero and one\nIQ_01_data = get_job_data(IQ_01_job, average=False)\nzero_data = IQ_01_data[0]\none_data = IQ_01_data[1]",
"_____no_output_____"
],
[
"def IQ_01_plot(x_min, x_max, y_min, y_max):\n \"\"\"Helper function for plotting IQ plane for |0>, |1>. Limits of plot given\n as arguments.\"\"\"\n # zero data plotted in blue\n plt.scatter(np.real(zero_data), np.imag(zero_data), \n s=5, cmap='viridis', c='blue', alpha=0.5, label=r'$|0\\rangle$')\n # one data plotted in red\n plt.scatter(np.real(one_data), np.imag(one_data), \n s=5, cmap='viridis', c='red', alpha=0.5, label=r'$|1\\rangle$')\n\n # Plot a large dot for the average result of the zero and one states.\n mean_zero = np.mean(zero_data) # takes mean of both real and imaginary parts\n mean_one = np.mean(one_data)\n plt.scatter(np.real(mean_zero), np.imag(mean_zero), \n s=200, cmap='viridis', c='black',alpha=1.0)\n plt.scatter(np.real(mean_one), np.imag(mean_one), \n s=200, cmap='viridis', c='black',alpha=1.0)\n \n plt.xlim(x_min, x_max)\n plt.ylim(y_min,y_max)\n plt.legend()\n plt.ylabel('I [a.u.]', fontsize=15)\n plt.xlabel('Q [a.u.]', fontsize=15)\n plt.title(\"0-1 discrimination\", fontsize=15)",
"_____no_output_____"
],
[
"x_min = -25\nx_max = -5\ny_min = -25\ny_max = 0\nIQ_01_plot(x_min, x_max, y_min, y_max)",
"_____no_output_____"
],
[
"def reshape_complex_vec(vec):\n \"\"\"Take in complex vector vec and return 2d array w/ real, imag entries. This is needed for the learning.\n Args:\n vec (list): complex vector of data\n Returns:\n list: vector w/ entries given by (real(vec], imag(vec))\n \"\"\"\n length = len(vec)\n vec_reshaped = np.zeros((length, 2))\n for i in range(len(vec)):\n vec_reshaped[i]=[np.real(vec[i]), np.imag(vec[i])]\n return vec_reshaped",
"_____no_output_____"
],
[
"# Create IQ vector (split real, imag parts)\nzero_data_reshaped = reshape_complex_vec(zero_data)\none_data_reshaped = reshape_complex_vec(one_data) \n\nIQ_01_data = np.concatenate((zero_data_reshaped, one_data_reshaped))\nprint(IQ_01_data.shape) # verify IQ data shape",
"(2048, 2)\n"
],
[
"from sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.model_selection import train_test_split",
"_____no_output_____"
],
[
"# construct vector w/ 0's and 1's (for testing)\nstate_01 = np.zeros(NUM_SHOTS) # shots gives number of experiments\nstate_01 = np.concatenate((state_01, np.ones(NUM_SHOTS)))\nprint(len(state_01))\n\n# Shuffle and split data into training and test sets\nIQ_01_train, IQ_01_test, state_01_train, state_01_test = train_test_split(IQ_01_data, state_01, test_size=0.5)",
"2048\n"
],
[
"# Set up the LDA\nLDA_01 = LinearDiscriminantAnalysis()\nLDA_01.fit(IQ_01_train, state_01_train)",
"_____no_output_____"
],
[
"# test on some simple data \nprint(LDA_01.predict([[0,0], [10, 0]]))",
"[0. 1.]\n"
],
[
"# Compute accuracy\nscore_01 = LDA_01.score(IQ_01_test, state_01_test)\nprint(score_01)",
"0.96484375\n"
],
[
"# Plot separatrix on top of scatter\ndef separatrixPlot(lda, x_min, x_max, y_min, y_max, shots):\n nx, ny = shots, shots\n\n xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),\n np.linspace(y_min, y_max, ny))\n Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])\n Z = Z[:, 1].reshape(xx.shape)\n\n plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='black')\n\nIQ_01_plot(x_min, x_max, y_min, y_max)\nseparatrixPlot(LDA_01, x_min, x_max, y_min, y_max, NUM_SHOTS)",
"_____no_output_____"
]
],
[
[
"## Discriminating the |0⟩, |1⟩ and |2⟩ states ",
"_____no_output_____"
]
],
[
[
"from qiskit.pulse.library import Waveform ## replaced by Wafeform after 0.25.0",
"_____no_output_____"
],
[
"def apply_sideband(pulse, freq):\n \"\"\"Apply a sinusoidal sideband to this pulse at frequency freq.\n Args:\n pulse (SamplePulse): The pulse of interest.\n freq (float): LO frequency for which we want to apply the sweep.\n Return:\n SamplePulse: Pulse with a sideband applied (oscillates at difference between freq and cal_qubit_freq).\n \"\"\"\n # time goes from 0 to dt*drive_samples, sine arg of form 2*pi*f*t\n t_samples = np.linspace(0, dt*drive_samples, drive_samples)\n sine_pulse = np.sin(2*np.pi*(freq-rough_qubit_frequency)*t_samples) # no amp for the sine\n \n # create sample pulse w/ sideband applied\n # Note: need to make sq_pulse.samples real, multiply elementwise\n sideband_pulse = Waveform(np.multiply(np.real(pulse.samples), sine_pulse), name='sideband_pulse')\n \n return sideband_pulse",
"_____no_output_____"
],
[
"def create_excited_freq_sweep_program(freqs, drive_power):\n \"\"\"Builds a program that does a freq sweep by exciting the |1> state. \n This allows us to obtain the 1->2 frequency. We get from the |0> to |1>\n state via a pi pulse using the calibrated qubit frequency. To do the \n frequency sweep from |1> to |2>, we use a sideband method by tacking\n a sine factor onto the sweep drive pulse.\n Args:\n freqs (np.ndarray(dtype=float)): Numpy array of frequencies to sweep.\n drive_power (float) : Value of drive amplitude.\n Raises:\n ValueError: Thrown if use more than 75 frequencies; currently, an error will be thrown on the backend \n if you try more than 75 frequencies.\n Returns:\n Qobj: Program for freq sweep experiment.\n \"\"\"\n if len(freqs) > 75:\n raise ValueError(\"You can only run 75 schedules at a time.\")\n \n print(f\"The frequency sweep will go from {freqs[0] / GHz} GHz to {freqs[-1]/ GHz} GHz \\\nusing {len(freqs)} frequencies. The drive power is {drive_power}.\")\n\n base_12_pulse = pulse_lib.gaussian(duration=drive_samples,\n sigma=drive_sigma,\n amp=drive_power,\n name='base_12_pulse')\n schedules = []\n for jj, freq in enumerate(freqs):\n \n # add sideband to gaussian pulse\n freq_sweep_12_pulse = apply_sideband(base_12_pulse, freq)\n \n # add commands to schedule\n schedule = pulse.Schedule(name=\"Frequency = {}\".format(freq))\n\n # Add 0->1 pulse, freq sweep pulse and measure\n schedule |= pulse.Play(pi_pulse_01, drive_chan)\n schedule |= pulse.Play(freq_sweep_12_pulse, drive_chan) << schedule.duration \n schedule |= measure << schedule.duration # shift measurement to after drive pulses\n\n schedules.append(schedule)\n\n num_freqs = len(freqs)\n \n # draw a schedule\n display(schedules[-1].draw(channels=[drive_chan, meas_chan], label=True, scale=1.0))\n \n # assemble freq sweep program \n # Note: LO is at cal_qubit_freq for each schedule; accounted for by sideband\n excited_freq_sweep_program = assemble(schedules,\n backend=backend, \n meas_level=1,\n meas_return='avg',\n shots=NUM_SHOTS,\n schedule_los=[{drive_chan: rough_qubit_frequency}]\n * num_freqs)\n \n return excited_freq_sweep_program",
"_____no_output_____"
],
[
"# sweep 400 MHz below 0->1 frequency to catch the 1->2 frequency\nnum_freqs = 75\nexcited_sweep_freqs = rough_qubit_frequency + np.linspace(-400*MHz, 30*MHz, num_freqs)\nexcited_freq_sweep_program = create_excited_freq_sweep_program(excited_sweep_freqs, drive_power=0.3)\n\n# Plot an example schedule to make sure it's valid",
"The frequency sweep will go from 4.571800928086361 GHz to 5.001800928086361 GHz using 75 frequencies. The drive power is 0.3.\n"
],
[
"excited_freq_sweep_job = backend.run(excited_freq_sweep_program)",
"<ipython-input-93-05357ecdcc34>:1: DeprecationWarning: Passing a Qobj to Backend.run is deprecated and will be removed in a future release. Please pass in circuits or pulse schedules instead.\n excited_freq_sweep_job = backend.run(excited_freq_sweep_program)\n"
],
[
"print(excited_freq_sweep_job.job_id())\njob_monitor(excited_freq_sweep_job)",
"60727b63dbfb94257527a3b6\nJob Status: job has successfully run\n"
],
[
"# Get job data (avg)\nexcited_freq_sweep_data = get_job_data(excited_freq_sweep_job, average=True)",
"_____no_output_____"
],
[
"# Note: we are only plotting the real part of the signal\nplt.scatter(excited_sweep_freqs/GHz, excited_freq_sweep_data, color='black')\nplt.xlim([min(excited_sweep_freqs/GHz)+0.01, max(excited_sweep_freqs/GHz)]) # ignore min point (is off)\nplt.xlabel(\"Frequency [GHz]\", fontsize=15)\nplt.ylabel(\"Measured Signal [a.u.]\", fontsize=15)\nplt.title(\"1->2 Frequency Sweep (first pass)\", fontsize=15)\nplt.show()",
"/Users/mingtsoweii/miniconda3/lib/python3.8/site-packages/numpy/core/_asarray.py:136: ComplexWarning: Casting complex values to real discards the imaginary part\n return array(a, dtype, copy=False, order=order, subok=True)\n"
],
[
"from scipy.optimize import curve_fit\nfrom scipy.signal import find_peaks",
"_____no_output_____"
],
[
"# Prints out relative maxima frequencies in output_data; height gives lower bound (abs val)\ndef rel_maxima(freqs, output_data, height): \n \"\"\"\n Prints out relative maxima frequencies in output_data (can see peaks); height gives upper bound (abs val).\n Be sure to set the height properly or the peak will be ignored!\n Args:\n freqs (list): frequency list\n output_data (list): list of resulting signals\n height (float): upper bound (abs val) on a peak\n Returns:\n list: List containing relative maxima frequencies\n \"\"\"\n peaks, _ = find_peaks(output_data, height)\n print(\"Freq. dips: \", freqs[peaks])\n return freqs[peaks]",
"_____no_output_____"
],
[
"maxima = rel_maxima(excited_sweep_freqs, np.real(excited_freq_sweep_data), 18)\napprox_12_freq = maxima",
"Freq. dips: [4.62409823e+09]\n"
],
[
"# smaller range refined sweep\nnum_freqs = 75\nrefined_excited_sweep_freqs = approx_12_freq + np.linspace(-20*MHz, 20*MHz, num_freqs)\nrefined_excited_freq_sweep_program = create_excited_freq_sweep_program(refined_excited_sweep_freqs, drive_power=0.3)",
"The frequency sweep will go from 4.604098225383658 GHz to 4.644098225383658 GHz using 75 frequencies. The drive power is 0.3.\n"
],
[
"refined_excited_freq_sweep_job = backend.run(refined_excited_freq_sweep_program)",
"<ipython-input-104-5d96535366bb>:1: DeprecationWarning: Passing a Qobj to Backend.run is deprecated and will be removed in a future release. Please pass in circuits or pulse schedules instead.\n refined_excited_freq_sweep_job = backend.run(refined_excited_freq_sweep_program)\n"
],
[
"print(refined_excited_freq_sweep_job.job_id())\njob_monitor(refined_excited_freq_sweep_job)",
"60727e25dbfb944b1627a3c5\nJob Status: job has successfully run\n"
],
[
"# Get the refined data (average)\nrefined_excited_freq_sweep_data = get_job_data(refined_excited_freq_sweep_job, average=True)",
"_____no_output_____"
],
[
"# do fit in Hz\n(refined_excited_sweep_fit_params, \n refined_excited_sweep_y_fit) = fit_function(refined_excited_sweep_freqs,\n refined_excited_freq_sweep_data, \n lambda x, A, q_freq, B, C: (A / np.pi) * (B / ((x - q_freq)**2 + B**2)) + C,\n [-12, 4.624*GHz, 0.05*GHz, 3*GHz] # initial parameters for curve_fit\n )",
"/Users/mingtsoweii/miniconda3/lib/python3.8/site-packages/numpy/core/_asarray.py:83: ComplexWarning: Casting complex values to real discards the imaginary part\n return array(a, dtype, copy=False, order=order)\n"
],
[
"# Note: we are only plotting the real part of the signal\nplt.scatter(refined_excited_sweep_freqs/GHz, refined_excited_freq_sweep_data, color='black')\nplt.plot(refined_excited_sweep_freqs/GHz, refined_excited_sweep_y_fit, color='red')\nplt.xlim([min(refined_excited_sweep_freqs/GHz), max(refined_excited_sweep_freqs/GHz)])\nplt.xlabel(\"Frequency [GHz]\", fontsize=15)\nplt.ylabel(\"Measured Signal [a.u.]\", fontsize=15)\nplt.title(\"1->2 Frequency Sweep (refined pass)\", fontsize=15)\nplt.show()",
"/Users/mingtsoweii/miniconda3/lib/python3.8/site-packages/numpy/core/_asarray.py:136: ComplexWarning: Casting complex values to real discards the imaginary part\n return array(a, dtype, copy=False, order=order, subok=True)\n"
],
[
"_, qubit_12_freq, _, _ = refined_excited_sweep_fit_params\nprint(f\"Our updated estimate for the 1->2 transition frequency is \"\n f\"{round(qubit_12_freq/GHz, 7)} GHz.\")",
"Our updated estimate for the 1->2 transition frequency is 4.6237816 GHz.\n"
],
[
"# experimental configuration\nnum_rabi_points = 75 # number of experiments (ie amplitudes to sweep out)\n\n# Drive amplitude values to iterate over: 75 amplitudes evenly spaced from 0 to 1.0\ndrive_amp_min = 0\ndrive_amp_max = 1.0\ndrive_amps = np.linspace(drive_amp_min, drive_amp_max, num_rabi_points)",
"_____no_output_____"
],
[
"# Create schedule\nrabi_12_schedules = []\n\n# loop over all drive amplitudes\nfor ii, drive_amp in enumerate(drive_amps):\n \n base_12_pulse = pulse_lib.gaussian(duration=drive_samples,\n sigma=drive_sigma,\n amp=drive_amp,\n name='base_12_pulse')\n # apply sideband at the 1->2 frequency\n rabi_12_pulse = apply_sideband(base_12_pulse, qubit_12_freq)\n \n # add commands to schedule\n schedule = pulse.Schedule(name='Rabi Experiment at drive amp = %s' % drive_amp)\n schedule |= pulse.Play(pi_pulse_01, drive_chan) # 0->1\n schedule |= pulse.Play(rabi_12_pulse, drive_chan) << schedule.duration # 1->2 Rabi pulse\n schedule |= measure << schedule.duration # shift measurement to after drive pulse\n \n rabi_12_schedules.append(schedule)",
"_____no_output_____"
],
[
"# Assemble the schedules into a program\n# Note: The LO frequency is at cal_qubit_freq to support the 0->1 pi pulse;\n# it is modified for the 1->2 pulse using sidebanding\nrabi_12_expt_program = assemble(rabi_12_schedules,\n backend=backend,\n meas_level=1,\n meas_return='avg',\n shots=NUM_SHOTS,\n schedule_los=[{drive_chan: rough_qubit_frequencyqubit_frequencyqubit_frequencyqubit_frequencyqubit_frequency_qubit_frequency_qubit_frequency}]\n * num_rabi_points)",
"_____no_output_____"
],
[
"rabi_12_job = backend.run(rabi_12_expt_program)",
"<ipython-input-113-a52f9b93cb08>:1: DeprecationWarning: Passing a Qobj to Backend.run is deprecated and will be removed in a future release. Please pass in circuits or pulse schedules instead.\n rabi_12_job = backend.run(rabi_12_expt_program)\n"
],
[
"print(rabi_12_job.job_id())\njob_monitor(rabi_12_job)",
"6072815c8733253ed7e5ceca\nJob Status: job has successfully run\n"
],
[
"# Get the job data (average)\nrabi_12_data = get_job_data(rabi_12_job, average=True)",
"_____no_output_____"
],
[
"print(len(rabi_12_data)*0.6)",
"45.0\n"
],
[
"# Note: We only plot the real part of the signal.\nrabi_12_data = np.real(baseline_remove(rabi_12_data))\n(rabi_12_fit_params, \n rabi_12_y_fit) = fit_function(drive_amps[:45],\n rabi_12_data[:45], \n lambda x, A, B, drive_12_period, phi: (A*np.cos(2*np.pi*x/drive_12_period - phi) + B),\n [2, -2.5, 0.4,0.5])\n\nplt.scatter(drive_amps, rabi_12_data, color='black')\nplt.plot(drive_amps[:45], rabi_12_y_fit, color='red')\n\ndrive_12_period = rabi_12_fit_params[2]\n# account for phi in computing pi amp\npi_amp_12 = (drive_12_period/2/np.pi) *(np.pi+rabi_12_fit_params[3])\n\nplt.axvline(pi_amp_12, color='red', linestyle='--')\nplt.axvline(pi_amp_12+drive_12_period/2, color='red', linestyle='--')\nplt.annotate(\"\", xy=(pi_amp_12+drive_12_period/2, 0), xytext=(pi_amp_12,0), arrowprops=dict(arrowstyle=\"<->\", color='red'))\nplt.annotate(\"$\\pi$\", xy=(pi_amp_12-0.03, 0.1), color='red')\n\nplt.xlabel(\"Drive amp [a.u.]\", fontsize=15)\nplt.ylabel(\"Measured signal [a.u.]\", fontsize=15)\nplt.title('Rabi Experiment (1->2)', fontsize=20)\nplt.show()",
"_____no_output_____"
],
[
"print(f\"Our updated estimate for the 1->2 transition frequency is \"\n f\"{round(qubit_12_freq/GHz, 7)} GHz.\")\nprint(f\"Pi Amplitude (1->2) = {pi_amp_12}\")",
"Our updated estimate for the 1->2 transition frequency is 4.6237816 GHz.\nPi Amplitude (1->2) = 0.2493075855271005\n"
],
[
"pi_pulse_12 = pulse_lib.gaussian(duration=drive_samples,\n amp=pi_amp_12, \n sigma=drive_sigma,\n name='pi_pulse_12')\n# make sure this pulse is sidebanded\npi_pulse_12 = apply_sideband(pi_pulse_12, qubit_12_freq)",
"_____no_output_____"
],
[
"# Create the three schedules\n\n# Ground state schedule\nzero_schedule = pulse.Schedule(name=\"zero schedule\")\nzero_schedule |= measure\n\n# Excited state schedule\none_schedule = pulse.Schedule(name=\"one schedule\")\none_schedule |= pulse.Play(pi_pulse_01, drive_chan)\none_schedule |= measure << one_schedule.duration\n\n# Excited state schedule\ntwo_schedule = pulse.Schedule(name=\"two schedule\")\ntwo_schedule |= pulse.Play(pi_pulse_01, drive_chan)\ntwo_schedule |= pulse.Play(pi_pulse_12, drive_chan) << two_schedule.duration\ntwo_schedule |= measure << two_schedule.duration",
"_____no_output_____"
],
[
"# Assemble the schedules into a program\nIQ_012_program = assemble([zero_schedule, one_schedule, two_schedule],\n backend=backend,\n meas_level=1,\n meas_return='single',\n shots=NUM_SHOTS,\n schedule_los=[{drive_chan: rough_qubit_frequency}] * 3)",
"_____no_output_____"
],
[
"# Assemble the schedules into a program\nIQ_012_program = assemble([zero_schedule, one_schedule, two_schedule],\n backend=backend,\n meas_level=1,\n meas_return='single',\n shots=NUM_SHOTS,\n schedule_los=[{drive_chan: rough_qubit_frequency}] * 3)",
"_____no_output_____"
],
[
"IQ_012_job = backend.run(IQ_012_program)",
"<ipython-input-196-c6097db95b4a>:1: DeprecationWarning: Passing a Qobj to Backend.run is deprecated and will be removed in a future release. Please pass in circuits or pulse schedules instead.\n IQ_012_job = backend.run(IQ_012_program)\n"
],
[
"print(IQ_012_job.job_id())\njob_monitor(IQ_012_job)",
"6072864b01ba967899179e41\nJob Status: job has successfully run\n"
],
[
"# Get job data (single); split for zero, one and two\nIQ_012_data = get_job_data(IQ_012_job, average=False)\nzero_data = IQ_012_data[0]\none_data = IQ_012_data[1]\ntwo_data = IQ_012_data[2]",
"_____no_output_____"
],
[
"def IQ_012_plot(x_min, x_max, y_min, y_max):\n \"\"\"Helper function for plotting IQ plane for 0, 1, 2. Limits of plot given\n as arguments.\"\"\"\n # zero data plotted in blue\n plt.scatter(np.real(zero_data), np.imag(zero_data), \n s=5, cmap='viridis', c='blue', alpha=0.5, label=r'$|0\\rangle$')\n # one data plotted in red\n plt.scatter(np.real(one_data), np.imag(one_data), \n s=5, cmap='viridis', c='red', alpha=0.5, label=r'$|1\\rangle$')\n # two data plotted in green\n plt.scatter(np.real(two_data), np.imag(two_data), \n s=5, cmap='viridis', c='green', alpha=0.5, label=r'$|2\\rangle$')\n\n # Plot a large dot for the average result of the 0, 1 and 2 states.\n mean_zero = np.mean(zero_data) # takes mean of both real and imaginary parts\n mean_one = np.mean(one_data)\n mean_two = np.mean(two_data)\n plt.scatter(np.real(mean_zero), np.imag(mean_zero), \n s=200, cmap='viridis', c='black',alpha=1.0)\n plt.scatter(np.real(mean_one), np.imag(mean_one), \n s=200, cmap='viridis', c='black',alpha=1.0)\n plt.scatter(np.real(mean_two), np.imag(mean_two), \n s=200, cmap='viridis', c='black',alpha=1.0)\n \n plt.xlim(x_min, x_max)\n plt.ylim(y_min,y_max)\n plt.legend()\n plt.ylabel('I [a.u.]', fontsize=15)\n plt.xlabel('Q [a.u.]', fontsize=15)\n plt.title(\"0-1-2 discrimination\", fontsize=15)",
"_____no_output_____"
],
[
"x_min = -10\nx_max = 20\ny_min = 10\ny_max = 35\nIQ_012_plot(x_min, x_max, y_min, y_max)",
"_____no_output_____"
],
[
"# Create IQ vector (split real, imag parts)\nzero_data_reshaped = reshape_complex_vec(zero_data)\none_data_reshaped = reshape_complex_vec(one_data) \ntwo_data_reshaped = reshape_complex_vec(two_data) \n\nIQ_012_data = np.concatenate((zero_data_reshaped, one_data_reshaped, two_data_reshaped))\nprint(IQ_012_data.shape) # verify IQ data shape",
"(3072, 2)\n"
],
[
"# construct vector w/ 0's, 1's and 2's (for testing)\nstate_012 = np.zeros(NUM_SHOTS) # shots gives number of experiments\nstate_012 = np.concatenate((state_012, np.ones(NUM_SHOTS)))\nstate_012 = np.concatenate((state_012, 2*np.ones(NUM_SHOTS)))\nprint(len(state_012))\n\n# Shuffle and split data into training and test sets\nIQ_012_train, IQ_012_test, state_012_train, state_012_test = train_test_split(IQ_012_data, state_012, test_size=0.5)\n# Set up the LDA\nLDA_012 = LinearDiscriminantAnalysis()\nLDA_012.fit(IQ_012_train, state_012_train)\n# test on some simple data \nprint(LDA_012.predict([[0, 0], [-10, 0], [-15, -5]]))\n# Compute accuracy\nscore_012 = LDA_012.score(IQ_012_test, state_012_test)\nprint(score_012)\nIQ_012_plot(x_min, x_max, y_min, y_max)\nseparatrixPlot(LDA_012, x_min, x_max, y_min, y_max, NUM_SHOTS)",
"3072\n[1. 1. 1.]\n0.8079427083333334\n"
],
[
"from sklearn.neural_network import MLPClassifier\nIQ_012_plot(x_min, x_max, y_min, y_max)\nNN_012 = MLPClassifier(solver='lbfgs',max_iter=1000)\nNN_012.fit(IQ_012_train, state_012_train)\nseparatrixPlot(NN_012, x_min, x_max, y_min, y_max, NUM_SHOTS)\nprint(NN_012.score(IQ_012_test, state_012_test))",
"/Users/mingtsoweii/miniconda3/lib/python3.8/site-packages/sklearn/neural_network/_multilayer_perceptron.py:500: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\n self.n_iter_ = _check_optimize_result(\"lbfgs\", opt_res, self.max_iter)\n"
],
[
"from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\nIQ_012_plot(x_min, x_max, y_min, y_max)\nQDA_012 = QuadraticDiscriminantAnalysis()\nQDA_012.fit(IQ_012_train, state_012_train)\nscoreq_012 = QDA_012.score(IQ_012_test, state_012_test)\nprint(scoreq_012)",
"0.8255208333333334\n"
],
[
"from sklearn.neural_network import MLPClassifier\nIQ_012_plot(x_min, x_max, y_min, y_max)\nNN_012 = MLPClassifier(hidden_layer_sizes = (100,100,),solver='lbfgs',max_iter=1000)\nNN_012.fit(IQ_012_train, state_012_train)\nseparatrixPlot(NN_012, x_min, x_max, y_min, y_max, NUM_SHOTS)\nprint(NN_012.score(IQ_012_test, state_012_test))",
"/Users/mingtsoweii/miniconda3/lib/python3.8/site-packages/sklearn/neural_network/_multilayer_perceptron.py:500: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\n self.n_iter_ = _check_optimize_result(\"lbfgs\", opt_res, self.max_iter)\n"
]
],
[
[
"### Qudit Gates Class\n\nCurrently we only have a few gate operations supported: xcyc (cyclic shift), axcyc (acyclic shift), x01, x12,x02, h01, h12, and h02 gates.",
"_____no_output_____"
]
],
[
[
"from qiskit.pulse import *\nfrom qiskit.pulse import library as pulse_lib\nclass QuditGates:\n def __init__(self, pi_amp_01, pi_amp_12, trans_freq_01, trans_freq_12, chan, dt):\n# self.sched = sched\n self.dt = dt\n self.pi_amp_01 = pi_amp_01\n self.pi_amp_12 = pi_amp_12\n self.chan = chan\n self.trans_freq_01 = trans_freq_01\n self.trans_freq_12 = trans_freq_12\n drive_sigma = 0.075*1e-6\n self.drive_sigma_dt = int(drive_sigma/dt) - (int(drive_sigma/dt%16))\n self.drive_samples_dt = 8*self.drive_sigma_dt\n self.pi_pul_01 = pulse_lib.gaussian(duration=self.drive_samples_dt,\n amp=self.pi_amp_01,\n sigma=self.drive_sigma_dt,\n )\n self.base_pulse = pulse_lib.gaussian(duration=self.drive_samples_dt,\n sigma=self.drive_sigma_dt,\n amp=self.pi_amp_12)\n# pi_pulse_12 = apply_sideband_n(base_pulse, trans_freq_12)\n \n def apply_sideband_n(self, base_pulse, freq):\n t_samples = np.linspace(0, dt*self.drive_samples_dt, num=self.drive_samples_dt)\n sine_pulse = np.sin(2*np.pi*(freq-self.trans_freq_01)*t_samples)\n sideband_pulse = Waveform(np.multiply(np.real(base_pulse.samples), sine_pulse), name='sideband_pulse')\n return sideband_pulse\n \n def xcyc(self):\n '''This the gate corresponding to the operator\n |n> --> |(n+1) mod 3>.'''\n pi_pul_12 = self.apply_sideband_n(self.base_pulse, trans_freq_12)\n with build() as xcyc_pulse:\n play(pi_pul_12, self.chan)\n play(self.pi_pul_01, self.chan)\n sched = Schedule()\n sched += xcyc_pulse\n return sched\n \n def xacyc(self):\n '''This the gate corresponding to the operator\n |n> --> |(n-1) mod 3>.'''\n pi_pul_12 = self.apply_sideband_n(self.base_pulse, trans_freq_12)\n with build as xacyc_pulse:\n play(self.pi_pul_01, self.chan)\n play(pi_pul_12, self.chan)\n sched = Schedule()\n sched += xacyc_pulse\n return sched\n def x01(self, chan):\n '''This the gate corresponding to the operator\n |0> --> |1> and |1> --> |0>.'''\n with pulse.build as x01_pulse:\n pulse.play(self.pi_pul_01, chan)\n sched = Schedule()\n return sched += x01_pulse\n\n def x12(self, chan):\n '''This the gate corresponding to the operator\n |1> --> |2> and |2> --> |1>.'''\n with pulse.build as x12_pulse:\n pulse.play(self.pi_pul_12, chan)\n sched = Schedule()\n return sched += x12_pulse\n\n def x02(self, chan):\n '''This the gate corresponding to the operator\n |0> --> |2> and |2> --> |0>.'''\n with pulse.build as x02_pulse:\n pulse.play(self.pi_pul_01, chan)\n pulse.play(self.pi_pul_12, chan)\n pulse.play(self.pi_pul_01, chan)\n sched = Schedule()\n return sched += x02_pulse\n def h01(self):\n qc = QuantumCircuit(1)\n qc.h(0)\n with build(self.backend) as h01_pulse:\n call(qc)\n sched = Schedule()\n sched += h01_pulse\n return sched\n\n def h12(self):\n qc = QuantumCircuit(1)\n qc.h(0)\n with build(self.backend) as h12_pulse:\n call(qc)\n sched = Schedule()\n sched += h12_pulse\n return sched\n\n def h02(self):\n qc = QuantumCircuit(1)\n qc.h(0)\n with build(self.backend) as h02_pulse:\n call(qc)\n sched = Schedule()\n sched += Play(self.pi_pul_01,self.chan)\n sched += h02_pulse\n sched += Play(self.pi_pul_01,self.chan)\n return sched",
"_____no_output_____"
],
[
"from qiskit.pulse import Schedule\n\ntrans_freq_01=4971800930\ntrans_freq_12=4623781600\n\npi_amp_01=0.14096173612665405\npi_amp_12=0.2493075855271005\ngate=QuditGates(pi_amp_01, pi_amp_12, trans_freq_01, trans_freq_12, drive_chan, dt)\ncyclic_shift_sched = Schedule()\ncyclic_shift_sched += gate.xcyc()\ninst_sched_map = backend_defaults.instruction_schedule_map\nmeasure = inst_sched_map.get('measure', qubits=backend_config.meas_map[meas_map_idx])\ncyclic_shift_sched |= measure << cyclic_shift_sched.duration",
"_____no_output_____"
],
[
"cyclic_shift_program = assemble(cyclic_shift_sched,\n backend=backend,\n meas_level=1,\n meas_return='single',\n shots=NUM_SHOTS,\n schedule_los=[{drive_chan: rough_qubit_frequency}])",
"_____no_output_____"
],
[
"cyclic_shift_sched.draw(label=True)",
"/Users/mingtsoweii/miniconda3/lib/python3.8/site-packages/qiskit/pulse/schedule.py:1534: DeprecationWarning: Legacy pulse drawer is deprecated. Specified arguments label are deprecated. Please check the API document of new pulse drawer `qiskit.visualization.pulse_drawer_v2`.\n warnings.warn('Legacy pulse drawer is deprecated. '\n"
],
[
"cyclic_shift_job = backend.run(cyclic_shift_program)",
"<ipython-input-366-cf96e0c6d087>:1: DeprecationWarning: Passing a Qobj to Backend.run is deprecated and will be removed in a future release. Please pass in circuits or pulse schedules instead.\n cyclic_shift_job = backend.run(cyclic_shift_program)\n"
],
[
"job_monitor(cyclic_shift_job)",
"Job Status: job has successfully run\n"
],
[
"cyclic_shift_data = get_job_data(cyclic_shift_job, average=False)\ncyclic_shift_data",
"_____no_output_____"
],
[
"cyclic_shift_reshaped=reshape_complex_vec(cyclic_shift_data[0])",
"_____no_output_____"
],
[
"def IQ_012_measure_plot(x_min, x_max, y_min, y_max):\n \"\"\"Helper function for plotting IQ plane for 0, 1, 2. Limits of plot given\n as arguments.\"\"\"\n # measured data plotted in blue\n plt.scatter(np.real(cyclic_shift_data), np.imag(cyclic_shift_data), \n s=5, cmap='viridis', c='blue', alpha=0.5, label=r'$|0\\rangle$')\n mean_data = np.mean(cyclic_shift_data) # takes mean of both real and imaginary parts\n plt.scatter(np.real(mean_data), np.imag(mean_data), \n s=200, cmap='viridis', c='black',alpha=1.0)\n \n plt.xlim(x_min, x_max)\n plt.ylim(y_min,y_max)\n plt.legend()\n plt.ylabel('I [a.u.]', fontsize=15)\n plt.xlabel('Q [a.u.]', fontsize=15)\n plt.title(\"0-1-2 discrimination of measured data\", fontsize=15)",
"_____no_output_____"
],
[
"x_min = -30\nx_max = 30\ny_min = -60\ny_max = 30\nIQ_012_measure_plot(x_min, x_max, y_min, y_max)\noutput=NN_012.predict(cyclic_shift_reshaped)\nseparatrixPlot(NN_012, x_min, x_max, y_min, y_max, NUM_SHOTS)",
"_____no_output_____"
],
[
"print(output)",
"[2. 2. 2. ... 2. 2. 2.]\n"
],
[
"unique, counts = np.unique(output, return_counts=True)\ndict(zip(unique, counts))\n",
"_____no_output_____"
]
],
[
[
"The discriminator seems to be away from the measure data of the cyclic shift. This problem needs to be solved in the future.",
"_____no_output_____"
],
[
"### Compare to LDA discriminator",
"_____no_output_____"
]
],
[
[
"x_min = -30\nx_max = 30\ny_min = -60\ny_max = 30\nIQ_012_measure_plot(x_min, x_max, y_min, y_max)\noutput_LDA=LDA_012.predict(cyclic_shift_reshaped)\nseparatrixPlot(LDA_012, x_min, x_max, y_min, y_max, NUM_SHOTS)",
"_____no_output_____"
],
[
"unique, counts = np.unique(output_LDA, return_counts=True)\ndict(zip(unique, counts))",
"_____no_output_____"
]
],
[
[
"It has the same issue of classification of the measured data.",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
4a1a4850654c17aefd87f31c3ec0319836b8762c
| 17,721 |
ipynb
|
Jupyter Notebook
|
CommaPlacementNotebooks/.ipynb_checkpoints/2.OPT_Attention_Decoder-checkpoint.ipynb
|
poulsvane/Deep-Learning
|
5baead95162a176c002b1e5fe3f8bdbf38753fa9
|
[
"Apache-2.0"
] | null | null | null |
CommaPlacementNotebooks/.ipynb_checkpoints/2.OPT_Attention_Decoder-checkpoint.ipynb
|
poulsvane/Deep-Learning
|
5baead95162a176c002b1e5fe3f8bdbf38753fa9
|
[
"Apache-2.0"
] | null | null | null |
CommaPlacementNotebooks/.ipynb_checkpoints/2.OPT_Attention_Decoder-checkpoint.ipynb
|
poulsvane/Deep-Learning
|
5baead95162a176c002b1e5fe3f8bdbf38753fa9
|
[
"Apache-2.0"
] | null | null | null | 38.274298 | 151 | 0.601772 |
[
[
[
"from __future__ import absolute_import, division, print_function \n\n%matplotlib inline \n# %matplotlib nbagg\nimport tensorflow as tf\nimport matplotlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom IPython import display\nfrom data_generator_tensorflow import get_batch, print_valid_characters\n\nimport os\nimport sys\nsys.path.append(os.path.join('.', '..')) \nimport utils \n\nimport tf_utils",
"_____no_output_____"
]
],
[
[
"# Attention Decoder\n> <span style=\"color:gray\">\nOriginal [Theano/Lasagne tutorial](https://github.com/DeepLearningDTU/nvidia_deep_learning_summercamp_2016/) by \nLars Maaløe ([larsmaaloee](https://github.com/larsmaaloee)),\nSøren Kaae Sønderby ([skaae](https://github.com/skaae)), and \nCasper Sønderby ([casperkaae](https://github.com/casperkaae)). \nConverted to TensorFlow by \nAlexander R. Johansen ([alrojo](https://github.com/alrojo)), \nand updated by \nToke Faurby ([faur](https://github.com/Faur)).\n> </span>\n\nSoft attention for recurrent neural networks have recently attracted a lot of interest.\nThese methods let the Decoder model selective focus on which part of the encoder sequence it will use for each decoded output symbol.\nThis relieves the encoder from having to compress the input sequence into a fixed size vector representation passed on to the decoder.\nSecondly we can interrogate the decoder network about where it attends while producing the ouputs.\nbelow we'll implement an decoder with selective attention and show that it significantly improves the performance of the toy translation task.\n\nThe seminal attention paper is https://arxiv.org/pdf/1409.0473v7.pdf\n\nThe principle of attention models is:\n\n1. Use the encoder to get the hidden represention $\\{h^1_e, ...h^n_e\\}$ for each position in the input sequence.\n2. For timestep $t$ in the decoder do for $m = 1...n$ : $a_{mt} = f(h^e_m, h^d_t)$. Where f is a function returning a scalar value.\n4. Weight each $h^e_m$ by its probability $p_{mt}$ and sum to get $h_{in}$.\n5. Use $h_{in}$ as an additional input to the decoder. $h_{in}$ is recalculated each time the decoder is updated.",
"_____no_output_____"
]
],
[
[
"# At the bottom of the script there is some code which saves the model.\n# If you wish to restore your model from a previous state use this function.\nload_model = False",
"_____no_output_____"
],
[
"# resetting the graph\ntf.reset_default_graph()\n\n# Setting up hyperparameters and general configs\nMAX_DIGITS = 10\nMIN_DIGITS = 5\nNUM_INPUTS = 27\nNUM_OUTPUTS = 11 #(0-9 + '#')\n\nBATCH_SIZE = 16\n# try various learning rates 1e-2 to 1e-5\nLEARNING_RATE = 0.005\nX_EMBEDDINGS = 8\nt_EMBEDDINGS = 8\nNUM_UNITS_ENC = 16\nNUM_UNITS_DEC = 16\nNUM_UNITS_ATTN = 16\n\n\n# Setting up placeholders, these are the tensors that we \"feed\" to our network\nXs = tf.placeholder(tf.int32, shape=[None, None], name='X_input')\nts_in = tf.placeholder(tf.int32, shape=[None, None], name='t_input_in')\nts_out = tf.placeholder(tf.int32, shape=[None, None], name='t_input_out')\nX_len = tf.placeholder(tf.int32, shape=[None], name='X_len')\nt_len = tf.placeholder(tf.int32, shape=[None], name='X_len')\nt_mask = tf.placeholder(tf.float32, shape=[None, None], name='t_mask')\n\n# Building the model\n\n# first we build the embeddings to make our characters into dense, trainable vectors\nX_embeddings = tf.get_variable('X_embeddings', [NUM_INPUTS, X_EMBEDDINGS],\n initializer=tf.random_normal_initializer(stddev=0.1))\nt_embeddings = tf.get_variable('t_embeddings', [NUM_OUTPUTS, t_EMBEDDINGS],\n initializer=tf.random_normal_initializer(stddev=0.1))\n\n# setting up weights for computing the final output\nW_out = tf.get_variable('W_out', [NUM_UNITS_DEC, NUM_OUTPUTS])\nb_out = tf.get_variable('b_out', [NUM_OUTPUTS])\n\nX_embedded = tf.gather(X_embeddings, Xs, name='embed_X')\nt_embedded = tf.gather(t_embeddings, ts_in, name='embed_t')\n\n# forward encoding\nenc_cell = tf.nn.rnn_cell.GRUCell(NUM_UNITS_ENC)#python.ops.rnn_cell.GRUCell\nenc_out, enc_state = tf.nn.dynamic_rnn(cell=enc_cell, inputs=X_embedded,\n sequence_length=X_len, dtype=tf.float32)\n# use below in case TF's does not work as intended\n#enc_state, _ = tf_utils.encoder(X_embedded, X_len, 'encoder', NUM_UNITS_ENC)\n#\n#enc_state = tf.concat(1, [enc_state, enc_state])\n\n# decoding\n# note that we are using a wrapper for decoding here, this wrapper is hardcoded to only use GRU\n# check out tf_utils to see how you make your own decoder\ndec_out, dec_out_valid, alpha_valid = \\\n tf_utils.attention_decoder(enc_out, X_len, enc_state, t_embedded, t_len,\n NUM_UNITS_DEC, NUM_UNITS_ATTN, t_embeddings,\n W_out, b_out)\n\n# reshaping to have [batch_size*seqlen, num_units]\nout_tensor = tf.reshape(dec_out, [-1, NUM_UNITS_DEC])\nout_tensor_valid = tf.reshape(dec_out_valid, [-1, NUM_UNITS_DEC])\n\n# computing output\nout_tensor = tf.matmul(out_tensor, W_out) + b_out\nout_tensor_valid = tf.matmul(out_tensor_valid, W_out) + b_out\n\n# reshaping back to sequence\nb_size = tf.shape(X_len)[0] # use a variable we know has batch_size in [0]\nseq_len = tf.shape(t_embedded)[1] # variable we know has sequence length in [1]\nnum_out = tf.constant(NUM_OUTPUTS) # casting NUM_OUTPUTS to a tensor variable\n\nout_shape = tf.concat([tf.expand_dims(b_size, 0),\n tf.expand_dims(seq_len, 0),\n tf.expand_dims(num_out, 0)],\n axis=0)\nout_tensor = tf.reshape(out_tensor, out_shape)\nout_tensor_valid = tf.reshape(out_tensor_valid, out_shape)\n\n## handling shape loss\ny = out_tensor\ny_valid = out_tensor_valid",
"_____no_output_____"
]
],
[
[
"### Defining the cost function, gradient clipping and accuracy",
"_____no_output_____"
]
],
[
[
"def loss_and_acc(preds):\n # sequence_loss_tensor is a modification of TensorFlow's own sequence_to_sequence_loss\n # TensorFlow's seq2seq loss works with a 2D list instead of a 3D tensors\n loss = tf_utils.sequence_loss_tensor(preds, ts_out, t_mask, NUM_OUTPUTS) # notice that we use ts_out here!\n # if you want regularization\n reg_scale = 0.00001\n regularize = tf.contrib.layers.l2_regularizer(reg_scale)\n params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n reg_term = sum([regularize(param) for param in params])\n loss += reg_term\n # calculate accuracy\n argmax = tf.to_int32(tf.argmax(preds, 2))\n correct = tf.to_float(tf.equal(argmax, ts_out)) * t_mask\n accuracy = tf.reduce_sum(correct) / tf.reduce_sum(t_mask)\n return loss, accuracy, argmax\n\nloss, accuracy, predictions = loss_and_acc(y)\nloss_valid, accuracy_valid, predictions_valid = loss_and_acc(y_valid)\n\n# use lobal step to keep track of our iterations\nglobal_step = tf.Variable(0, name='global_step', trainable=False)\n\n# pick optimizer, try momentum or adadelta\noptimizer = tf.train.AdamOptimizer(LEARNING_RATE)\n\n# extract gradients for each variable\ngrads_and_vars = optimizer.compute_gradients(loss)\n\n## add below for clipping by norm\n#gradients, variables = zip(*grads_and_vars) # unzip list of tuples\n#clipped_gradients, global_norm = (\n# tf.clip_by_global_norm(gradients, self.clip_norm) )\n#grads_and_vars = zip(clipped_gradients, variables)\n\n## apply gradients and make trainable function\ntrain_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)",
"_____no_output_____"
]
],
[
[
"### Testing the forward pass",
"_____no_output_____"
]
],
[
[
"# print all the variable names and shapes\n# notice that W_z is now packed, such that it contains both W_z_h and W_x_h, this is for optimization\n# further, we now have W_s, b_s. This is so NUM_UNITS_ENC and NUM_UNITS_DEC does not have to share shape ..!\nfor var in tf.global_variables ():\n s = var.name + \" \"*(40-len(var.name))\n print (s, var.value().get_shape())",
"_____no_output_____"
],
[
"#Generate some validation data\nX_val, X_len_val, t_in_val, t_out_val, t_len_val, t_mask_val, \\\ntext_inputs_val, text_targets_in_val, text_targets_out_val = \\\n get_batch(batch_size=5000, max_digits=MAX_DIGITS,min_digits=MIN_DIGITS)\nprint(\"X_val\", X_val.shape)\nprint(\"t_out_val\", t_out_val.shape)",
"_____no_output_____"
],
[
"## Start the session\n# restricting memory usage, TensorFlow is greedy and will use all memory otherwise\ngpu_opts = tf.GPUOptions(per_process_gpu_memory_fraction=0.35)\n# initialize the Session\nsess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_opts))\n# test train part\nsess.run(tf.global_variables_initializer())",
"_____no_output_____"
],
[
"%%time \n## If you get an error, remove this line! It makes the error message hard to understand.\n\n# NOTICE - THIS MIGHT TAKE UPTO 30 MINUTES ON CPU..!\n# setting up running parameters\nval_interval = 5000\nsamples_to_process = 2e5\nsamples_processed = 0\nsamples_val = []\ncosts, accs = [], []\nplt.figure()\ntry:\n while samples_processed < samples_to_process:\n # load data\n X_tr, X_len_tr, t_in_tr, t_out_tr, t_len_tr, t_mask_tr, \\\n text_inputs_tr, text_targets_in_tr, text_targets_out_tr = \\\n get_batch(batch_size=BATCH_SIZE,max_digits=MAX_DIGITS,min_digits=MIN_DIGITS)\n # make fetches\n fetches_tr = [train_op, loss, accuracy]\n # set up feed dict\n feed_dict_tr = {Xs: X_tr, X_len: X_len_tr, ts_in: t_in_tr,\n ts_out: t_out_tr, t_len: t_len_tr, t_mask: t_mask_tr}\n # run the model\n res = tuple(sess.run(fetches=fetches_tr, feed_dict=feed_dict_tr))\n _, batch_cost, batch_acc = res\n costs += [batch_cost]\n samples_processed += BATCH_SIZE\n #if samples_processed % 1000 == 0: print batch_cost, batch_acc\n #validation data\n if samples_processed % val_interval == 0:\n #print \"validating\"\n fetches_val = [accuracy_valid, y_valid, alpha_valid]\n feed_dict_val = {Xs: X_val, X_len: X_len_val, ts_in: t_in_val,\n ts_out: t_out_val, t_len: t_len_val, t_mask: t_mask_val}\n res = tuple(sess.run(fetches=fetches_val, feed_dict=feed_dict_val))\n acc_val, output_val, alp_val = res\n samples_val += [samples_processed]\n accs += [acc_val]\n plt.plot(samples_val, accs, 'b-')\n plt.ylabel('Validation Accuracy', fontsize=15)\n plt.xlabel('Processed samples', fontsize=15)\n plt.title('', fontsize=20)\n plt.grid('on')\n plt.savefig(\"out_attention.png\")\n display.display(display.Image(filename=\"out_attention.png\"))\n display.clear_output(wait=True)\n# NOTICE - THIS MIGHT TAKE UPTO 30 MINUTES ON CPU..!\nexcept KeyboardInterrupt:\n pass",
"_____no_output_____"
],
[
"#plot of validation accuracy for each target position\nplt.figure(figsize=(7,7))\nplt.plot(np.mean(np.argmax(output_val,axis=2)==t_out_val,axis=0))\nplt.ylabel('Accuracy', fontsize=15)\nplt.xlabel('Target position', fontsize=15)\n#plt.title('', fontsize=20)\nplt.grid('on')\nplt.show()\n#why do the plot look like this?",
"_____no_output_____"
],
[
"### attention plot, try with different i = 1, 2, ..., 1000\ni = 42\n\ncolumn_labels = map(str, list(t_out_val[i]))\nrow_labels = map(str, (list(X_val[i])))\ndata = alp_val[i]\nfig, ax = plt.subplots()\nheatmap = ax.pcolor(data, cmap=plt.cm.Blues)\n\n# put the major ticks at the middle of each cell\nax.set_xticks(np.arange(data.shape[1])+0.5, minor=False)\nax.set_yticks(np.arange(data.shape[0])+0.5, minor=False)\n\n# want a more natural, table-like display\nax.invert_yaxis()\nax.xaxis.tick_top()\n\nax.set_xticklabels(row_labels, minor=False)\nax.set_yticklabels(column_labels, minor=False)\n\nplt.ylabel('output', fontsize=15)\nplt.xlabel('Attention plot', fontsize=15)\n\nplt.show()",
"_____no_output_____"
],
[
"# Plot of average attention weight as a function of the sequence position for each of \n# the 21 targets in the output sequence i.e. each line is the mean postion of the \n# attention for each target position.\n\nnp.mean(alp_val, axis=0).shape\nplt.figure()\nplt.plot(np.mean(alp_val, axis=0).T)\nplt.ylabel('alpha', fontsize=15)\nplt.xlabel('Input Sequence position', fontsize=15)\nplt.title('Alpha weights', fontsize=20)\nplt.legend(map(str,range(1,22)), bbox_to_anchor=(1.125,1.0), fontsize=10)\nplt.show()\n",
"_____no_output_____"
],
[
"## Save model\n# Read more about saving and loading models at https://www.tensorflow.org/programmers_guide/saved_model\n\n# Save model\nsave_path = tf.train.Saver().save(sess, \"/tmp/model.ckpt\")\nprint(\"Model saved in file: %s\" % save_path)\n",
"_____no_output_____"
],
[
"sess.close()",
"_____no_output_____"
]
],
[
[
"## Assignments for the attention decoder\n1. Explain what the attention plot shows.\n2. Explain what the alpha weights plot shows.\n 3. Why are the alpha curve for the first digit narrow and peaked while later digits have alpha curves that are wider and less peaked?\n4. Why is attention a good idea for this problem? Can you think of other problems where attention is a good choice?\n 1. Compare the performance and training time (number of samples processed) for the models with and without attention.\n5. Try setting MIN_DIGITS and MAX_DIGITS to 20\n6. Enable gradient clipping (under the loss codeblock)",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
4a1a4a29e6d0e1a4314125fbf0f753ffff3e8ec5
| 11,706 |
ipynb
|
Jupyter Notebook
|
python_Basics.ipynb
|
NoelJoseRoach/Business-Analytics-with-Python
|
20d5e553be1fc43be9b37953a237fd0eaa949081
|
[
"Apache-2.0"
] | null | null | null |
python_Basics.ipynb
|
NoelJoseRoach/Business-Analytics-with-Python
|
20d5e553be1fc43be9b37953a237fd0eaa949081
|
[
"Apache-2.0"
] | null | null | null |
python_Basics.ipynb
|
NoelJoseRoach/Business-Analytics-with-Python
|
20d5e553be1fc43be9b37953a237fd0eaa949081
|
[
"Apache-2.0"
] | null | null | null | 20.536842 | 58 | 0.377755 |
[
[
[
"6+9",
"_____no_output_____"
],
[
"420/42",
"_____no_output_____"
],
[
"5-4",
"_____no_output_____"
],
[
"6%100",
"_____no_output_____"
],
[
"6*9",
"_____no_output_____"
],
[
"9-6",
"_____no_output_____"
],
[
"x=10",
"_____no_output_____"
],
[
"print(x)",
"10\n"
],
[
"kathy=10",
"_____no_output_____"
],
[
"print(kathy)",
"10\n"
],
[
"x='thiruvananthapuram'",
"_____no_output_____"
],
[
"print(x)",
"thiruvananthapuram\n"
],
[
"x=10\ny=20.5\nz='your name'",
"_____no_output_____"
],
[
"print(x,y,z)",
"10 20.5 your name\n"
],
[
"type(y)",
"_____no_output_____"
],
[
"type(z)",
"_____no_output_____"
],
[
"x='ted'",
"_____no_output_____"
],
[
"y='robin'",
"_____no_output_____"
],
[
"print(x+y)",
"tedrobin\n"
],
[
"x=input('Enter a number')",
"Enter a number420\n"
],
[
"print(x)",
"420\n"
],
[
"x=input('enter 1st number')\ny=input('enter 2nd number')",
"enter 1st number69\nenter 2nd number69\n"
],
[
"print(x,y)\nprint(x+y)",
"69 69\n6969\n"
],
[
"type(x)\ntype(y)",
"_____no_output_____"
],
[
"int(x)+int(y)",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a1a5c306ce6468f9a5f098843fabd37b9b88513
| 164,659 |
ipynb
|
Jupyter Notebook
|
openslidertfpy/.ipynb_checkpoints/read_region_example-checkpoint.ipynb
|
OtaYuji/openslider-tfpy
|
19891babb1d2ad85e0a3866939f86e0fdbcce3d8
|
[
"Apache-2.0"
] | 4 |
2020-05-10T11:36:10.000Z
|
2021-12-12T14:22:12.000Z
|
openslidertfpy/.ipynb_checkpoints/read_region_example-checkpoint.ipynb
|
OtaYuji/openslider-tfpy
|
19891babb1d2ad85e0a3866939f86e0fdbcce3d8
|
[
"Apache-2.0"
] | 1 |
2021-03-20T07:20:34.000Z
|
2021-03-20T07:20:34.000Z
|
openslidertfpy/.ipynb_checkpoints/read_region_example-checkpoint.ipynb
|
yujota/openslider-tfpy
|
19891babb1d2ad85e0a3866939f86e0fdbcce3d8
|
[
"Apache-2.0"
] | null | null | null | 848.757732 | 87,508 | 0.95436 |
[
[
[
"import tensorflow as tf\nimport numpy as np\nfrom PIL import Image\n%matplotlib inline\nfrom matplotlib.pyplot import imshow\n\nfrom openslidertfpy import MicroPatchReader, is_mock",
"_____no_output_____"
],
[
"with tf.Graph().as_default():\n coordinator = tf.train.Coordinator()\n reader = MicroPatchReader(\n \"sample.svs\", coordinator, image_width=500, image_height=500, verbose=False\n )\n images, locations, levels = reader.get_inputs()\n results = list()\n \n with tf.Session() as sess:\n locations2read = [(0, 0), (500*16, 0)]\n level2read = 2\n \n reader.start_thread([(l, level2read) for l in locations2read])\n \n while not coordinator.should_stop():\n imgs, ls, vs = sess.run([images, locations, levels])\n results.extend([\n (i, l, v) for i, l, v in zip(imgs, ls, vs)\n if not is_mock(l, v)\n ])",
"_____no_output_____"
],
[
"left_img, left_location, left_level = results[0]\nleft_img = Image.fromarray(left_img.astype(np.uint8))",
"_____no_output_____"
],
[
"imshow(left_img)",
"_____no_output_____"
],
[
"print(left_location, left_level)",
"[0 0] 2\n"
],
[
"right_img, right_location, right_level = results[1]\nright_img = Image.fromarray(right_img.astype(np.uint8))",
"_____no_output_____"
],
[
"imshow(right_img)",
"_____no_output_____"
],
[
"print(right_location, right_level)",
"[8000 0] 2\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a1a6639b7810c7ac5a28fc6783f52b94a25e0bc
| 25,081 |
ipynb
|
Jupyter Notebook
|
notebooks/eICU_experiments.ipynb
|
jvpoulos/dpsom
|
38e2d2835e751341223bb3904be145ed923aab7c
|
[
"MIT"
] | 1 |
2020-05-12T18:27:19.000Z
|
2020-05-12T18:27:19.000Z
|
notebooks/eICU_experiments.ipynb
|
jvpoulos/dpsom
|
38e2d2835e751341223bb3904be145ed923aab7c
|
[
"MIT"
] | null | null | null |
notebooks/eICU_experiments.ipynb
|
jvpoulos/dpsom
|
38e2d2835e751341223bb3904be145ed923aab7c
|
[
"MIT"
] | null | null | null | 33.263926 | 140 | 0.531996 |
[
[
[
"# eICU Experiments",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf\nimport numpy as np\nimport h5py\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\nimport tensorflow_probability as tfp\nimport sklearn\nfrom sklearn import metrics\nimport seaborn as sns\nimport random",
"_____no_output_____"
]
],
[
[
"Follow Read-me instruction to download the medical data.\n\nAfter having downloaded the data in '../data/eICU_data.h5', upload the data:",
"_____no_output_____"
]
],
[
[
"def get_data(test=True):\n hf = h5py.File('../data/eICU_data.h5', 'r')\n data_total = np.array(hf.get('x'))\n endpoints_total = np.array(hf.get('y'))\n hf.close()\n data_train, data_val, y_train, endpoints_total_val = train_test_split(data_total[:int(len(data_total) * 0.85)],\n endpoints_total[:int(len(data_total) * 0.85)],\n test_size=0.20,\n random_state=42)\n if test:\n data_val = data_total[int(len(data_total) * 0.85):]\n endpoints_total_val = endpoints_total[int(len(data_total) * 0.85):]\n return data_train, data_val, y_train, endpoints_total_val",
"_____no_output_____"
],
[
"def batch_generator(data_train, data_val, endpoints_total_val, batch_size, mode=\"train\"):\n while True:\n if mode == \"train\":\n for i in range(len(data_train) // batch_size):\n time_series = data_train[i * batch_size: (i + 1) * batch_size]\n yield time_series, i\n elif mode == \"val\":\n for i in range(len(data_val) // batch_size):\n time_series = data_val[i * batch_size: (i + 1) * batch_size]\n time_series_endpoint = endpoints_total_val[i * batch_size: (i + 1) * batch_size]\n yield time_series, time_series_endpoint, i\n else:\n raise ValueError(\"The mode has to be in {train, val}\")",
"_____no_output_____"
]
],
[
[
"Insert the name of the job in ex_name:",
"_____no_output_____"
]
],
[
[
"ex_name=\"hyperopt_LSTM_20_16-16_2020-02-17_35a17\"",
"_____no_output_____"
]
],
[
[
"Get the data:",
"_____no_output_____"
]
],
[
[
"batch_size=128\nmodelpath = \"../models/{}/{}\".format(ex_name, ex_name)\ndata_train, data_val, endpoints_total_train, endpoints_total_val = get_data(test=True)",
"_____no_output_____"
]
],
[
[
"## Create heat-maps, trajectories and probability distributions",
"_____no_output_____"
]
],
[
[
"som_dim = [16,16]\nlatent_dim=10",
"_____no_output_____"
],
[
"val_gen = batch_generator(data_train, data_val, endpoints_total_val, 300, mode=\"val\")",
"_____no_output_____"
],
[
"num_batches = len(data_val) // 300\ntf.reset_default_graph()\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n saver = tf.train.import_meta_graph(modelpath+\".meta\")\n saver.restore(sess, modelpath)\n graph = tf.get_default_graph()\n k = graph.get_tensor_by_name(\"k/k:0\")\n z_e = graph.get_tensor_by_name(\"z_e_sample/z_e:0\")\n x = graph.get_tensor_by_name(\"inputs/x:0\")\n is_training = graph.get_tensor_by_name(\"is_training/is_training:0\")\n graph = tf.get_default_graph()\n z_e_p = graph.get_tensor_by_name(\"prediction/next_state/input_lstm:0\")\n q = graph.get_tensor_by_name(\"q/distribution/q:0\")\n embeddings = graph.get_tensor_by_name(\"embeddings/embeddings:0\")\n reconstruction = graph.get_tensor_by_name(\"reconstruction_e/x_hat:0\")\n \n print(\"Evaluation...\")\n test_k_all = []\n labels_val_all = []\n z_e_all=[]\n z_q_all = []\n qq = []\n for i in range(num_batches):\n batch_data, batch_labels, ii = next(val_gen)\n f_dic = {x: batch_data}\n test_k_all.extend(sess.run(k, feed_dict=f_dic))\n labels_val_all.extend(batch_labels)\n z_q_all.extend(sess.run(q, feed_dict=f_dic))\n qq.extend(sess.run(q, feed_dict=f_dic))\n z_e_all.extend(sess.run(z_e, feed_dict=f_dic))\n labels_val_all = np.array(labels_val_all)\n k_all = np.array(test_k_all)\n qq = np.array(qq)\n labels_val_all = np.reshape(labels_val_all, (-1, labels_val_all.shape[-1]))\n NMI_24 = metrics.normalized_mutual_info_score(labels_val_all[:, 3], k_all)\n NMI_12 = metrics.normalized_mutual_info_score(labels_val_all[:, 2], k_all)\n NMI_6 = metrics.normalized_mutual_info_score(labels_val_all[:, 1], k_all)\n NMI_1 = metrics.normalized_mutual_info_score(labels_val_all[:, 0], k_all)\n \n embb = sess.run(embeddings, feed_dict={x: data_val})",
"_____no_output_____"
]
],
[
[
"Get the labels:",
"_____no_output_____"
]
],
[
[
"labels_12 = labels_val_all[:,2]\nlabels_1 = labels_val_all[:,0]\nlabels_6 = labels_val_all[:,1]\nlabels_24 = labels_val_all[:,3]\nhosp_disc_1 = labels_val_all[:,4]\nhosp_disc_6 = labels_val_all[:,5]\nhosp_disc_12 = labels_val_all[:,6]\nhosp_disc_24 = labels_val_all[:,7]\nu_disc_1 = labels_val_all[:,8]\nu_disc_6 = labels_val_all[:,9]\nu_disc_12 = labels_val_all[:,10]\nu_disc_24 = labels_val_all[:, 11]\nlabels_1 = labels_1.astype(int)\nlabels_6 = labels_6.astype(int)\nlabels_12 = labels_12.astype(int)\nlabels_24 = labels_24.astype(int)\nhosp_disc_12 = hosp_disc_12.astype(int)\nhosp_disc_24 = hosp_disc_24.astype(int)\nhosp_disc_1 = hosp_disc_1.astype(int)\nhosp_disc_6 = hosp_disc_6.astype(int)",
"_____no_output_____"
]
],
[
[
"### Moran Index",
"_____no_output_____"
]
],
[
[
"sd = som_dim[0]*som_dim[1]\nmean = np.sum(labels_val_all[:, 0]) / len(labels_val_all[:, 0])\nones = np.ones((len(np.reshape(k_all, (-1)))))\nclust_matr1 = np.zeros(som_dim[0]*som_dim[1])\nlabels= labels_val_all[:, 0]\nfor i in range(som_dim[0]*som_dim[1]):\n dd = np.sum(ones[np.where(np.reshape(k_all, (-1))==i)])\n if dd == 0:\n s1 = 0\n else:\n s1 = np.sum(labels[np.where(np.reshape(k_all, (-1))==i)]) / np.sum(ones[np.where(np.reshape(k_all, (-1))==i)])\n clust_matr1[i] = s1\n\nk = np.arange(0,sd)\nk1 = k // som_dim[0]\nk2 = k % som_dim[0]\nW = np.zeros((sd,sd))\nfor i in range(sd):\n for j in range(sd):\n d1 = np.abs((k1[i] - k1[j]))\n d2 = np.abs((k2[i] - k2[j]))\n d1 = min(som_dim[0]-d1, d1)\n d2 = min(som_dim[0]-d2, d2)\n W[i,j] = np.exp(-(d1+d2))\n if i==j:\n W[i,j]=0\nM = 0\nN_n = 0\nfor i in range(sd):\n for j in range(sd):\n M += (clust_matr1[i] -mean)*(clust_matr1[j] -mean)* W[i,j]\nfor i in range(sd):\n N_n += (clust_matr1[i]-mean)**2\nW_n = np.sum(W)\nI = M * sd / (N_n*W_n)",
"_____no_output_____"
],
[
"print(I)",
"_____no_output_____"
]
],
[
[
"### APACHE score heatmap:",
"_____no_output_____"
]
],
[
[
"labels = labels_1\nones = np.ones((len(np.reshape(k_all, (-1)))))\nclust_matr1 = np.zeros(som_dim[0]*som_dim[1])\nclust_matr2 = np.zeros(som_dim[0]*som_dim[1])\nfor i in range(som_dim[0]*som_dim[1]):\n s1 = np.sum(labels[np.where(np.reshape(k_all, (-1))==i)]) / np.sum(ones[np.where(np.reshape(k_all, (-1))==i)])\n clust_matr1[i] = s1\nclust_matr1 = np.reshape(clust_matr1, (som_dim[0],som_dim[1]))\nax = sns.heatmap(clust_matr1, cmap=\"YlGnBu\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Trajectories",
"_____no_output_____"
]
],
[
[
"T = []\nS = []\nfor i in range(1000):\n h = np.reshape(u_disc_1, (-1,72))\n if np.max(h[i]) == 1:\n T.append(i)\n else:\n S.append(i)",
"_____no_output_____"
],
[
"ind_r = np.random.random_integers(0, 50, 10)\nind_s = np.random.random_integers(0, 50, 10)\nT = np.array(T)\nS = np.array(S)\na = np.concatenate([S[ind_s], T[ind_r]])",
"_____no_output_____"
],
[
"k_all.shape",
"_____no_output_____"
],
[
"labels = labels_1\nit = 0\nk_all = np.reshape(k_all, (-1,72))\nfig, ax = plt.subplots(5, 4, figsize=(50,43)) \nones = np.ones((len(np.reshape(k_all, (-1)))))\nclust_matr1 = np.zeros(som_dim[0]*som_dim[1])\nclust_matr2 = np.zeros(som_dim[0]*som_dim[1])\nfor i in range(som_dim[0]*som_dim[1]):\n s1 = np.sum(labels[np.where(np.reshape(k_all, (-1)) == i)]) / np.sum(ones[np.where(np.reshape(k_all, (-1))==i)])\n clust_matr1[i] = s1\nclust_matr1 = np.reshape(clust_matr1, (som_dim[0],som_dim[1]))\nfor t in a:\n #fig, ax = plt.subplots(figsize=(10,7.5)) \n if it > 9:\n c = \"r\"\n #print(t)\n else:\n c = \"g\"\n cc = it % 4\n rr = it // 4\n g = sns.heatmap(clust_matr1, cmap=\"YlGnBu\",ax=ax[rr][cc])\n k_1 = k_all[t] // som_dim[1]\n k_2 = k_all[t] % som_dim[1]\n ax[rr][cc].plot(k_2[:] + 0.5, k_1[:] + 0.5, color=c, linewidth=4)\n ax[rr][cc].scatter(k_2[0] + 0.5, k_1[0] + 0.5, color=c, s=200, label='Start')\n ax[rr][cc].scatter(k_2[1:-1] + 0.5, k_1[1:-1] + 0.5, color=c, linewidth=5, marker='.')\n ax[rr][cc].scatter(k_2[-1] + 0.5, k_1[-1] + 0.5, color=c, s=500, linewidth=4, marker='x', label='End')\n ax[rr][cc].legend(loc=2, prop={'size': 20})\n it +=1\nplt.show()",
"_____no_output_____"
]
],
[
[
"Probability distribution over trajectory:",
"_____no_output_____"
]
],
[
[
"qq.shape",
"_____no_output_____"
],
[
"prob_q = np.reshape(qq, (-1, 72, som_dim[0]*som_dim[1])) \ni = np.random.randint(0, 50) #Randomly sampled patient\nit = 0\nfig, ax = plt.subplots(2, 3, figsize=(50,25))\nk_all = np.reshape(k_all, (-1,72))\nfor t in [0, 17, 40, 57, 64, 71]:\n cc = it % 3\n rr = it // 3\n k_1 = k_all[i] // som_dim[1]\n k_2 = k_all[i] % som_dim[1]\n c = \"black\"\n g1 = sns.heatmap(np.reshape(prob_q[i, t], (som_dim[0],som_dim[1])), cmap='Reds', alpha=1, ax=ax[rr][cc])\n ax[rr][cc].plot(k_2[:] + 0.5, k_1[:] + 0.5, color=c, linewidth=6)\n ax[rr][cc].scatter(k_2[0] + 0.5, k_1[0] + 0.5, color=c, s=800, label='Start')\n ax[rr][cc].scatter(k_2[1:-1] + 0.5, k_1[1:-1] + 0.5, color=c, linewidth=10, marker='.')\n ax[rr][cc].scatter(k_2[-1] + 0.5, k_1[-1] + 0.5, color=c, s=1200, linewidth=10, marker='x', label='End')\n ax[rr][cc].legend(loc=2, prop={'size': 30}) \n ax[rr][cc].set_title(\"Time-step = {}\".format(it*14), fontsize=40)\n it +=1\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Unrolling future time-steps and prediction",
"_____no_output_____"
]
],
[
[
"def z_dist_flat(z_e, embeddings):\n \"\"\"Computes the distances between the encodings and the embeddings.\"\"\"\n emb = np.reshape(embeddings, (som_dim[0]*som_dim[1], -1))\n z = np.reshape(z_e, (z_e.shape[0], 1, latent_dim))\n z = np.tile(z, [1,som_dim[0]*som_dim[1], 1])\n z_dist = np.square(z-emb)\n z_dist_red = np.sum(z_dist, axis=-1)\n return z_dist_red",
"_____no_output_____"
],
[
"val_gen = batch_generator(data_train, data_val, endpoints_total_val, 300, mode=\"val\")",
"_____no_output_____"
],
[
"tf.reset_default_graph()\nnum_batches = len(data_val) // 300\nlatent_dim = 20\nnum_pred = 6\nsom = 16*16\nmax_n_step = 72\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n saver = tf.train.import_meta_graph(modelpath+\".meta\")\n saver.restore(sess, modelpath)\n graph = tf.get_default_graph()\n k = graph.get_tensor_by_name(\"k/k:0\")\n z_e = graph.get_tensor_by_name(\"z_e_sample/z_e:0\")\n next_z_e = graph.get_tensor_by_name(\"prediction/next_z_e:0\")\n x = graph.get_tensor_by_name(\"inputs/x:0\")\n is_training = graph.get_tensor_by_name(\"is_training/is_training:0\")\n graph = tf.get_default_graph()\n init_1 = graph.get_tensor_by_name(\"prediction/next_state/init_state:0\")\n z_e_p = graph.get_tensor_by_name(\"prediction/next_state/input_lstm:0\")\n state1 = graph.get_tensor_by_name(\"prediction/next_state/next_state:0\")\n q = graph.get_tensor_by_name(\"q/distribution/q:0\")\n embeddings = graph.get_tensor_by_name(\"embeddings/embeddings:0\")\n z_p = graph.get_tensor_by_name('reconstruction_e/decoder/z_e:0')\n reconstruction = graph.get_tensor_by_name(\"reconstruction_e/x_hat:0\")\n \n print(\"Evaluation...\")\n training_dic = {is_training: True, z_e_p: np.zeros((max_n_step * len(data_val), latent_dim)),\n init_1: np.zeros((2, batch_size, 100)), z_p: np.zeros((max_n_step * len(data_val), latent_dim))}\n k_all = []\n z_e_all=[]\n z_q_all = []\n qq = []\n x_rec = []\n for i in range(num_batches):\n batch_data, batch_labels, ii = next(val_gen)\n f_dic = {x: batch_data}\n k_all.extend(sess.run(k, feed_dict=f_dic))\n z_q_all.extend(sess.run(q, feed_dict=f_dic))\n z_e_all.extend(sess.run(z_e, feed_dict=f_dic))\n qq.extend(sess.run(q, feed_dict=f_dic))\n f_dic.update(training_dic)\n x_rec.extend(sess.run(reconstruction, feed_dict=f_dic))\n z_e_all = np.array(z_e_all)\n k_all = np.array(k_all)\n qq = np.array(qq)\n x_rec = np.array(x_rec)\n z_e_all = z_e_all.reshape((-1, max_n_step, latent_dim))\n k_all = k_all.reshape((-1, max_n_step))\n \n t = 72-num_pred\n \n embeddings = sess.run(embeddings, feed_dict={x: data_val[:, :t, :]})\n embeddings = np.reshape(embeddings,(-1, latent_dim))\n \n z_e_o = z_e_all[:, :t, :]\n k_o = k_all[:, :t]\n k_eval=[]\n next_z_e_o = []\n state1_o =[]\n for i in range(num_batches):\n batch_data, batch_labels, ii = next(val_gen)\n batch_data=batch_data[:, :t, :]\n f_dic = {x: batch_data}\n f_dic.update(training_dic)\n next_z_e_o.extend(sess.run(next_z_e, feed_dict=f_dic))\n if i == 0:\n state1_o = sess.run(state1, feed_dict=f_dic)\n else:\n state1_o = np.concatenate([state1_o, sess.run(state1, feed_dict=f_dic)], axis=1)\n next_z_e_o = np.array(next_z_e_o)\n state1_o = np.array(state1_o)\n \n next_z_e_o_all = np.reshape(next_z_e_o[:, -1, :], (-1,1,latent_dim))\n next_z_e_o = next_z_e_o[:, -1, :]\n k_next = np.argmin(z_dist_flat(next_z_e_o, embeddings), axis=-1)\n k_o = np.concatenate([k_o, np.expand_dims(k_next,1)], axis=1)\n z_e_o = np.concatenate([z_e_o, np.expand_dims(next_z_e_o, 1)], axis=1)\n f_dic = {x: np.zeros((len(data_val),1, 98)), is_training: False, z_e_p: np.zeros((1 * len(data_val), latent_dim)),\n z_p: next_z_e_o, init_1: np.zeros((2, batch_size, 100))}\n x_pred_hat = np.reshape(sess.run(reconstruction, feed_dict=f_dic), (-1, 1, 98))\n \n for i in range(num_pred-1):\n print(i)\n inp = data_val[:1500, (t + i), :]\n f_dic = {x: np.reshape(inp, (inp.shape[0],1,inp.shape[1]))}\n val_dic = {is_training: False, z_e_p: next_z_e_o, init_1: state1_o, z_p: np.zeros((max_n_step * len(inp), latent_dim))}\n f_dic.update(val_dic)\n next_z_e_o = sess.run(next_z_e, feed_dict=f_dic)\n state1_o = sess.run(state1, feed_dict=f_dic)\n next_z_e_o_all = np.concatenate([next_z_e_o_all, next_z_e_o], axis=1)\n k_next = np.argmin(z_dist_flat(next_z_e_o, embeddings), axis=-1)\n k_o = np.concatenate([k_o, np.expand_dims(k_next,1)], axis=1)\n z_e_o = np.concatenate([z_e_o, next_z_e_o], axis=1)\n next_z_e_o = np.reshape(next_z_e_o, (-1, latent_dim))\n f_dic = {x: np.zeros((len(data_val),1, 98)), is_training: False, z_e_p: np.zeros((max_n_step * len(data_val), latent_dim)),\n z_p: next_z_e_o, init_1: np.zeros((2, batch_size, 100))}\n final_x = sess.run(reconstruction, feed_dict=f_dic)\n x_pred_hat = np.concatenate([x_pred_hat, np.reshape(final_x, (-1, 1, 98))], axis = 1)\n \n f_dic = {x: np.zeros((1500,1, 98)), is_training: False, z_e_p: np.zeros((max_n_step * 1500, latent_dim)),\n z_p: z_e_all[:, t-1, :], init_1: np.zeros((2, batch_size, 100))}\n final_x = sess.run(reconstruction, feed_dict=f_dic)",
"_____no_output_____"
],
[
"sklearn.metrics.mean_squared_error(np.reshape(x_pred_hat, (-1, 98)), np.reshape(data_val[:1500, -num_pred:], (-1, 98)))",
"_____no_output_____"
]
],
[
[
"Accuracy of unrolled state:",
"_____no_output_____"
]
],
[
[
"k_true = np.reshape(k_all[:, -num_pred:], (-1))\nk_pred = np.reshape(k_o[:, -num_pred:], (-1))\ntot = 0\nacc = 0\nfor i in range(len(k_true)):\n tot += 1\n if k_true[i] == k_pred[i]:\n acc += 1\nacc = acc / tot\nacc",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a1a7e3638c20ccf79cc731f2b1ef0977284a820
| 127,975 |
ipynb
|
Jupyter Notebook
|
Section-03-Metrics/03-06-Precision-Recall-Curves.ipynb
|
bkiselgof/machine-learning-imbalanced-data
|
a5a4b8613411e42c041c103b72394b53c9fa0d62
|
[
"BSD-3-Clause"
] | null | null | null |
Section-03-Metrics/03-06-Precision-Recall-Curves.ipynb
|
bkiselgof/machine-learning-imbalanced-data
|
a5a4b8613411e42c041c103b72394b53c9fa0d62
|
[
"BSD-3-Clause"
] | null | null | null |
Section-03-Metrics/03-06-Precision-Recall-Curves.ipynb
|
bkiselgof/machine-learning-imbalanced-data
|
a5a4b8613411e42c041c103b72394b53c9fa0d62
|
[
"BSD-3-Clause"
] | null | null | null | 215.084034 | 25,820 | 0.901434 |
[
[
[
"## Precision-Recall-Curves",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\n\nfrom sklearn.metrics import plot_precision_recall_curve\n\nfrom yellowbrick.classifier import PrecisionRecallCurve",
"_____no_output_____"
]
],
[
[
"## Load data",
"_____no_output_____"
]
],
[
[
"# load data\ndata = pd.read_csv('../kdd2004.csv')\n\n# remap target class to 0 and 1\ndata['target'] = data['target'].map({-1:0, 1:1})\n\ndata.head()",
"_____no_output_____"
],
[
"# data size\n\ndata.shape",
"_____no_output_____"
],
[
"# imbalanced target\n\ndata.target.value_counts() / len(data)",
"_____no_output_____"
],
[
"# separate dataset into train and test\n\nX_train, X_test, y_train, y_test = train_test_split(\n data.drop(labels=['target'], axis=1), # drop the target\n data['target'], # just the target\n test_size=0.3,\n random_state=0)\n\nX_train.shape, X_test.shape",
"_____no_output_____"
]
],
[
[
"## Train ML models\n\n### Random Forests",
"_____no_output_____"
]
],
[
[
"rf = RandomForestClassifier(n_estimators=100, random_state=39, max_depth=2, n_jobs=4)\n\nrf.fit(X_train, y_train)\n\ny_train_rf = rf.predict_proba(X_train)[:,1]\ny_test_rf = rf.predict_proba(X_test)[:,1]",
"_____no_output_____"
]
],
[
[
"### Logistic Regression",
"_____no_output_____"
]
],
[
[
"logit = LogisticRegression(random_state=0, max_iter=1000)\n\nlogit.fit(X_train, y_train)\n\ny_train_logit = logit.predict_proba(X_train)[:,1]\ny_test_logit = logit.predict_proba(X_test)[:,1]",
"/home/bk/anaconda3/lib/python3.8/site-packages/sklearn/linear_model/_logistic.py:762: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n n_iter_i = _check_optimize_result(\n"
]
],
[
[
"## Precision-Recall Curve\n\n### Sklearn\n\nhttps://scikit-learn.org/stable/modules/generated/sklearn.metrics.plot_precision_recall_curve.html#sklearn.metrics.plot_precision_recall_curve",
"_____no_output_____"
]
],
[
[
"rf_disp = plot_precision_recall_curve(rf, X_test, y_test)\nlogit_disp = plot_precision_recall_curve(logit, X_test, y_test)",
"findfont: Font family ['sans-serif'] not found. Falling back to DejaVu Sans.\nfindfont: Font family ['sans-serif'] not found. Falling back to DejaVu Sans.\n"
],
[
"ax = plt.gca()\nrf_disp.plot(ax=ax, alpha=0.8)\nlogit_disp.plot(ax=ax, alpha=0.8)",
"_____no_output_____"
]
],
[
[
"### Yellobrick\n\nhttps://www.scikit-yb.org/en/latest/api/classifier/prcurve.html",
"_____no_output_____"
]
],
[
[
"visualizer = PrecisionRecallCurve(rf, classes=[0, 1])\n\nvisualizer.fit(X_train, y_train) # Fit the training data to the visualizer\nvisualizer.score(X_test, y_test) # Evaluate the model on the test data\nvisualizer.show() # Finalize and show the figure",
"findfont: Font family ['sans-serif'] not found. Falling back to DejaVu Sans.\n"
],
[
"visualizer = PrecisionRecallCurve(logit, classes=[0, 1])\n\nvisualizer.fit(X_train, y_train) # Fit the training data to the visualizer\nvisualizer.score(X_test, y_test) # Evaluate the model on the test data\nvisualizer.show() # Finalize and show the figure",
"_____no_output_____"
]
],
[
[
"## Additional Reading\n\nhttps://towardsdatascience.com/on-roc-and-precision-recall-curves-c23e9b63820c",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
4a1a887a795af85dc7e8e3f5591738eff481083e
| 210,881 |
ipynb
|
Jupyter Notebook
|
2. Improving Deep Neural Networks Hyperparameter tuning, Regularization and Optimization/week7/Tensorflow Tutorial.ipynb
|
adityajn105/Coursera-Deep-Learning-Specialization
|
26cf7da29b2f1cb32799e045cc9cdfab99ad0757
|
[
"Unlicense"
] | 2 |
2020-08-21T03:59:01.000Z
|
2020-09-05T13:13:19.000Z
|
2. Improving Deep Neural Networks Hyperparameter tuning, Regularization and Optimization/week7/Tensorflow Tutorial.ipynb
|
adityajn105/Coursera-Deep-Learning-Specialization
|
26cf7da29b2f1cb32799e045cc9cdfab99ad0757
|
[
"Unlicense"
] | null | null | null |
2. Improving Deep Neural Networks Hyperparameter tuning, Regularization and Optimization/week7/Tensorflow Tutorial.ipynb
|
adityajn105/Coursera-Deep-Learning-Specialization
|
26cf7da29b2f1cb32799e045cc9cdfab99ad0757
|
[
"Unlicense"
] | null | null | null | 127.884172 | 118,292 | 0.840242 |
[
[
[
"# TensorFlow Tutorial\n\nWelcome to this week's programming assignment. Until now, you've always used numpy to build neural networks. Now we will step you through a deep learning framework that will allow you to build neural networks more easily. Machine learning frameworks like TensorFlow, PaddlePaddle, Torch, Caffe, Keras, and many others can speed up your machine learning development significantly. All of these frameworks also have a lot of documentation, which you should feel free to read. In this assignment, you will learn to do the following in TensorFlow: \n\n- Initialize variables\n- Start your own session\n- Train algorithms \n- Implement a Neural Network\n\nPrograming frameworks can not only shorten your coding time, but sometimes also perform optimizations that speed up your code. \n\n## 1 - Exploring the Tensorflow Library\n\nTo start, you will import the library:\n",
"_____no_output_____"
]
],
[
[
"import math\nimport numpy as np\nimport h5py\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom tensorflow.python.framework import ops\nfrom tf_utils import load_dataset, random_mini_batches, convert_to_one_hot, predict\n\n\n%matplotlib inline\nnp.random.seed(1)",
"_____no_output_____"
]
],
[
[
"Now that you have imported the library, we will walk you through its different applications. You will start with an example, where we compute for you the loss of one training example. \n$$loss = \\mathcal{L}(\\hat{y}, y) = (\\hat y^{(i)} - y^{(i)})^2 \\tag{1}$$",
"_____no_output_____"
]
],
[
[
"y_hat = tf.constant(36, name='y_hat') # Define y_hat constant. Set to 36.\ny = tf.constant(39, name='y') # Define y. Set to 39\n\nloss = tf.Variable((y - y_hat)**2, name='loss') # Create a variable for the loss\n\ninit = tf.global_variables_initializer() # When init is run later (session.run(init)),\n # the loss variable will be initialized and ready to be computed\nwith tf.Session() as session: # Create a session and print the output\n session.run(init) # Initializes the variables\n print(session.run(loss)) # Prints the loss",
"9\n"
]
],
[
[
"Writing and running programs in TensorFlow has the following steps:\n\n1. Create Tensors (variables) that are not yet executed/evaluated. \n2. Write operations between those Tensors.\n3. Initialize your Tensors. \n4. Create a Session. \n5. Run the Session. This will run the operations you'd written above. \n\nTherefore, when we created a variable for the loss, we simply defined the loss as a function of other quantities, but did not evaluate its value. To evaluate it, we had to run `init=tf.global_variables_initializer()`. That initialized the loss variable, and in the last line we were finally able to evaluate the value of `loss` and print its value.\n\nNow let us look at an easy example. Run the cell below:",
"_____no_output_____"
]
],
[
[
"a = tf.constant(2)\nb = tf.constant(10)\nc = tf.multiply(a,b)\nprint(c)",
"Tensor(\"Mul_1:0\", shape=(), dtype=int32)\n"
]
],
[
[
"As expected, you will not see 20! You got a tensor saying that the result is a tensor that does not have the shape attribute, and is of type \"int32\". All you did was put in the 'computation graph', but you have not run this computation yet. In order to actually multiply the two numbers, you will have to create a session and run it.",
"_____no_output_____"
]
],
[
[
"sess = tf.Session()\nprint(sess.run(c))",
"20\n"
]
],
[
[
"Great! To summarize, **remember to initialize your variables, create a session and run the operations inside the session**. \n\nNext, you'll also have to know about placeholders. A placeholder is an object whose value you can specify only later. \nTo specify values for a placeholder, you can pass in values by using a \"feed dictionary\" (`feed_dict` variable). Below, we created a placeholder for x. This allows us to pass in a number later when we run the session. ",
"_____no_output_____"
]
],
[
[
"# Change the value of x in the feed_dict\n\nx = tf.placeholder(tf.int64, name = 'x')\nprint(sess.run(2 * x, feed_dict = {x: 3}))\nsess.close()",
"6\n"
]
],
[
[
"When you first defined `x` you did not have to specify a value for it. A placeholder is simply a variable that you will assign data to only later, when running the session. We say that you **feed data** to these placeholders when running the session. \n\nHere's what's happening: When you specify the operations needed for a computation, you are telling TensorFlow how to construct a computation graph. The computation graph can have some placeholders whose values you will specify only later. Finally, when you run the session, you are telling TensorFlow to execute the computation graph.",
"_____no_output_____"
],
[
"### 1.1 - Linear function\n\nLets start this programming exercise by computing the following equation: $Y = WX + b$, where $W$ and $X$ are random matrices and b is a random vector. \n\n**Exercise**: Compute $WX + b$ where $W, X$, and $b$ are drawn from a random normal distribution. W is of shape (4, 3), X is (3,1) and b is (4,1). As an example, here is how you would define a constant X that has shape (3,1):\n```python\nX = tf.constant(np.random.randn(3,1), name = \"X\")\n\n```\nYou might find the following functions helpful: \n- tf.matmul(..., ...) to do a matrix multiplication\n- tf.add(..., ...) to do an addition\n- np.random.randn(...) to initialize randomly\n",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: linear_function\n\ndef linear_function():\n \"\"\"\n Implements a linear function: \n Initializes W to be a random tensor of shape (4,3)\n Initializes X to be a random tensor of shape (3,1)\n Initializes b to be a random tensor of shape (4,1)\n Returns: \n result -- runs the session for Y = WX + b \n \"\"\"\n \n np.random.seed(1)\n \n ### START CODE HERE ### (4 lines of code)\n X = np.random.randn(3, 1)\n W = np.random.randn(4, 3)\n b = np.random.randn(4, 1)\n Y = tf.add(tf.matmul(W, X), b)\n ### END CODE HERE ### \n \n # Create the session using tf.Session() and run it with sess.run(...) on the variable you want to calculate\n \n ### START CODE HERE ###\n sess = tf.Session()\n result = sess.run( Y )\n ### END CODE HERE ### \n \n # close the session \n sess.close()\n\n return result",
"_____no_output_____"
],
[
"print( \"result = \" + str(linear_function()))",
"result = [[-2.15657382]\n [ 2.95891446]\n [-1.08926781]\n [-0.84538042]]\n"
]
],
[
[
"*** Expected Output ***: \n\n<table> \n<tr> \n<td>\n**result**\n</td>\n<td>\n[[-2.15657382]\n [ 2.95891446]\n [-1.08926781]\n [-0.84538042]]\n</td>\n</tr> \n\n</table> ",
"_____no_output_____"
],
[
"### 1.2 - Computing the sigmoid \nGreat! You just implemented a linear function. Tensorflow offers a variety of commonly used neural network functions like `tf.sigmoid` and `tf.softmax`. For this exercise lets compute the sigmoid function of an input. \n\nYou will do this exercise using a placeholder variable `x`. When running the session, you should use the feed dictionary to pass in the input `z`. In this exercise, you will have to (i) create a placeholder `x`, (ii) define the operations needed to compute the sigmoid using `tf.sigmoid`, and then (iii) run the session. \n\n** Exercise **: Implement the sigmoid function below. You should use the following: \n\n- `tf.placeholder(tf.float32, name = \"...\")`\n- `tf.sigmoid(...)`\n- `sess.run(..., feed_dict = {x: z})`\n\n\nNote that there are two typical ways to create and use sessions in tensorflow: \n\n**Method 1:**\n```python\nsess = tf.Session()\n# Run the variables initialization (if needed), run the operations\nresult = sess.run(..., feed_dict = {...})\nsess.close() # Close the session\n```\n**Method 2:**\n```python\nwith tf.Session() as sess: \n # run the variables initialization (if needed), run the operations\n result = sess.run(..., feed_dict = {...})\n # This takes care of closing the session for you :)\n```\n",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: sigmoid\n\ndef sigmoid(z):\n \"\"\"\n Computes the sigmoid of z\n \n Arguments:\n z -- input value, scalar or vector\n \n Returns: \n results -- the sigmoid of z\n \"\"\"\n \n ### START CODE HERE ### ( approx. 4 lines of code)\n # Create a placeholder for x. Name it 'x'.\n x = tf.placeholder(tf.float32,name=\"x\")\n\n # compute sigmoid(x)\n sigmoid = tf.sigmoid(x)\n\n # Create a session, and run it. Please use the method 2 explained above. \n # You should use a feed_dict to pass z's value to x. \n with tf.Session() as sess:\n # Run session and call the output \"result\"\n result = sess.run( sigmoid, feed_dict={x:z} )\n \n ### END CODE HERE ###\n \n return result",
"_____no_output_____"
],
[
"print (\"sigmoid(0) = \" + str(sigmoid(0)))\nprint (\"sigmoid(12) = \" + str(sigmoid(12)))",
"sigmoid(0) = 0.5\nsigmoid(12) = 0.999994\n"
]
],
[
[
"*** Expected Output ***: \n\n<table> \n<tr> \n<td>\n**sigmoid(0)**\n</td>\n<td>\n0.5\n</td>\n</tr>\n<tr> \n<td>\n**sigmoid(12)**\n</td>\n<td>\n0.999994\n</td>\n</tr> \n\n</table> ",
"_____no_output_____"
],
[
"<font color='blue'>\n**To summarize, you how know how to**:\n1. Create placeholders\n2. Specify the computation graph corresponding to operations you want to compute\n3. Create the session\n4. Run the session, using a feed dictionary if necessary to specify placeholder variables' values. ",
"_____no_output_____"
],
[
"### 1.3 - Computing the Cost\n\nYou can also use a built-in function to compute the cost of your neural network. So instead of needing to write code to compute this as a function of $a^{[2](i)}$ and $y^{(i)}$ for i=1...m: \n$$ J = - \\frac{1}{m} \\sum_{i = 1}^m \\large ( \\small y^{(i)} \\log a^{ [2] (i)} + (1-y^{(i)})\\log (1-a^{ [2] (i)} )\\large )\\small\\tag{2}$$\n\nyou can do it in one line of code in tensorflow!\n\n**Exercise**: Implement the cross entropy loss. The function you will use is: \n\n\n- `tf.nn.sigmoid_cross_entropy_with_logits(logits = ..., labels = ...)`\n\nYour code should input `z`, compute the sigmoid (to get `a`) and then compute the cross entropy cost $J$. All this can be done using one call to `tf.nn.sigmoid_cross_entropy_with_logits`, which computes\n\n$$- \\frac{1}{m} \\sum_{i = 1}^m \\large ( \\small y^{(i)} \\log \\sigma(z^{[2](i)}) + (1-y^{(i)})\\log (1-\\sigma(z^{[2](i)})\\large )\\small\\tag{2}$$\n\n",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: cost\n\ndef cost(logits, labels):\n \"\"\"\n Computes the cost using the sigmoid cross entropy\n \n Arguments:\n logits -- vector containing z, output of the last linear unit (before the final sigmoid activation)\n labels -- vector of labels y (1 or 0) \n \n Note: What we've been calling \"z\" and \"y\" in this class are respectively called \"logits\" and \"labels\" \n in the TensorFlow documentation. So logits will feed into z, and labels into y. \n \n Returns:\n cost -- runs the session of the cost (formula (2))\n \"\"\"\n \n ### START CODE HERE ### \n \n # Create the placeholders for \"logits\" (z) and \"labels\" (y) (approx. 2 lines)\n z = tf.placeholder(tf.float32, name=\"z\")\n y = tf.placeholder(tf.float32, name=\"y\")\n \n # Use the loss function (approx. 1 line)\n cost = tf.nn.sigmoid_cross_entropy_with_logits(logits=z, labels=y)\n \n # Create a session (approx. 1 line). See method 1 above.\n sess = tf.Session()\n \n # Run the session (approx. 1 line).\n cost = sess.run(cost, feed_dict={ z:logits, y:labels })\n \n # Close the session (approx. 1 line). See method 1 above.\n sess.close()\n \n ### END CODE HERE ###\n \n return cost",
"_____no_output_____"
],
[
"logits = sigmoid(np.array([0.2,0.4,0.7,0.9]))\ncost = cost(logits, np.array([0,0,1,1]))\nprint (\"cost = \" + str(cost))",
"cost = [ 1.00538719 1.03664088 0.41385433 0.39956614]\n"
]
],
[
[
"** Expected Output** : \n\n<table> \n <tr> \n <td>\n **cost**\n </td>\n <td>\n [ 1.00538719 1.03664088 0.41385433 0.39956614]\n </td>\n </tr>\n\n</table>",
"_____no_output_____"
],
[
"### 1.4 - Using One Hot encodings\n\nMany times in deep learning you will have a y vector with numbers ranging from 0 to C-1, where C is the number of classes. If C is for example 4, then you might have the following y vector which you will need to convert as follows:\n\n\n<img src=\"images/onehot.png\" style=\"width:600px;height:150px;\">\n\nThis is called a \"one hot\" encoding, because in the converted representation exactly one element of each column is \"hot\" (meaning set to 1). To do this conversion in numpy, you might have to write a few lines of code. In tensorflow, you can use one line of code: \n\n- tf.one_hot(labels, depth, axis) \n\n**Exercise:** Implement the function below to take one vector of labels and the total number of classes $C$, and return the one hot encoding. Use `tf.one_hot()` to do this. ",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: one_hot_matrix\n\ndef one_hot_matrix(labels, C):\n \"\"\"\n Creates a matrix where the i-th row corresponds to the ith class number and the jth column\n corresponds to the jth training example. So if example j had a label i. Then entry (i,j) \n will be 1. \n \n Arguments:\n labels -- vector containing the labels \n C -- number of classes, the depth of the one hot dimension\n \n Returns: \n one_hot -- one hot matrix\n \"\"\"\n \n ### START CODE HERE ###\n \n # Create a tf.constant equal to C (depth), name it 'C'. (approx. 1 line)\n C = tf.constant( C, name='C' ) \n \n # Use tf.one_hot, be careful with the axis (approx. 1 line)\n one_hot_matrix = tf.one_hot( labels, C, axis=0 )\n \n # Create the session (approx. 1 line)\n sess = tf.Session()\n \n # Run the session (approx. 1 line)\n one_hot = sess.run(one_hot_matrix)\n \n # Close the session (approx. 1 line). See method 1 above.\n sess.close()\n \n ### END CODE HERE ###\n \n return one_hot",
"_____no_output_____"
],
[
"labels = np.array([1,2,3,0,2,1])\none_hot = one_hot_matrix(labels, C = 4)\nprint (\"one_hot = \" + str(one_hot))",
"one_hot = [[ 0. 0. 0. 1. 0. 0.]\n [ 1. 0. 0. 0. 0. 1.]\n [ 0. 1. 0. 0. 1. 0.]\n [ 0. 0. 1. 0. 0. 0.]]\n"
]
],
[
[
"**Expected Output**: \n\n<table> \n <tr> \n <td>\n **one_hot**\n </td>\n <td>\n [[ 0. 0. 0. 1. 0. 0.]\n [ 1. 0. 0. 0. 0. 1.]\n [ 0. 1. 0. 0. 1. 0.]\n [ 0. 0. 1. 0. 0. 0.]]\n </td>\n </tr>\n\n</table>\n",
"_____no_output_____"
],
[
"### 1.5 - Initialize with zeros and ones\n\nNow you will learn how to initialize a vector of zeros and ones. The function you will be calling is `tf.ones()`. To initialize with zeros you could use tf.zeros() instead. These functions take in a shape and return an array of dimension shape full of zeros and ones respectively. \n\n**Exercise:** Implement the function below to take in a shape and to return an array (of the shape's dimension of ones). \n\n - tf.ones(shape)\n",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: ones\n\ndef ones(shape):\n \"\"\"\n Creates an array of ones of dimension shape\n \n Arguments:\n shape -- shape of the array you want to create\n \n Returns: \n ones -- array containing only ones\n \"\"\"\n \n ### START CODE HERE ###\n \n # Create \"ones\" tensor using tf.ones(...). (approx. 1 line)\n ones = tf.ones(shape)\n \n # Create the session (approx. 1 line)\n sess = tf.Session()\n \n # Run the session to compute 'ones' (approx. 1 line)\n ones = sess.run(ones)\n \n # Close the session (approx. 1 line). See method 1 above.\n sess.close()\n \n ### END CODE HERE ###\n return ones",
"_____no_output_____"
],
[
"print (\"ones = \" + str(ones([3])))",
"ones = [ 1. 1. 1.]\n"
]
],
[
[
"**Expected Output:**\n\n<table> \n <tr> \n <td>\n **ones**\n </td>\n <td>\n [ 1. 1. 1.]\n </td>\n </tr>\n\n</table>",
"_____no_output_____"
],
[
"# 2 - Building your first neural network in tensorflow\n\nIn this part of the assignment you will build a neural network using tensorflow. Remember that there are two parts to implement a tensorflow model:\n\n- Create the computation graph\n- Run the graph\n\nLet's delve into the problem you'd like to solve!\n\n### 2.0 - Problem statement: SIGNS Dataset\n\nOne afternoon, with some friends we decided to teach our computers to decipher sign language. We spent a few hours taking pictures in front of a white wall and came up with the following dataset. It's now your job to build an algorithm that would facilitate communications from a speech-impaired person to someone who doesn't understand sign language.\n\n- **Training set**: 1080 pictures (64 by 64 pixels) of signs representing numbers from 0 to 5 (180 pictures per number).\n- **Test set**: 120 pictures (64 by 64 pixels) of signs representing numbers from 0 to 5 (20 pictures per number).\n\nNote that this is a subset of the SIGNS dataset. The complete dataset contains many more signs.\n\nHere are examples for each number, and how an explanation of how we represent the labels. These are the original pictures, before we lowered the image resolutoion to 64 by 64 pixels.\n<img src=\"images/hands.png\" style=\"width:800px;height:350px;\"><caption><center> <u><font color='purple'> **Figure 1**</u><font color='purple'>: SIGNS dataset <br> <font color='black'> </center>\n\n\nRun the following code to load the dataset.",
"_____no_output_____"
]
],
[
[
"# Loading the dataset\nX_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()",
"_____no_output_____"
]
],
[
[
"Change the index below and run the cell to visualize some examples in the dataset.",
"_____no_output_____"
]
],
[
[
"# Example of a picture\nindex = 0\nplt.imshow(X_train_orig[index])\nprint (\"y = \" + str(np.squeeze(Y_train_orig[:, index])))",
"y = 5\n"
]
],
[
[
"As usual you flatten the image dataset, then normalize it by dividing by 255. On top of that, you will convert each label to a one-hot vector as shown in Figure 1. Run the cell below to do so.",
"_____no_output_____"
]
],
[
[
"# Flatten the training and test images\nX_train_flatten = X_train_orig.reshape(X_train_orig.shape[0], -1).T\nX_test_flatten = X_test_orig.reshape(X_test_orig.shape[0], -1).T\n# Normalize image vectors\nX_train = X_train_flatten/255.\nX_test = X_test_flatten/255.\n# Convert training and test labels to one hot matrices\nY_train = convert_to_one_hot(Y_train_orig, 6)\nY_test = convert_to_one_hot(Y_test_orig, 6)\n\nprint (\"number of training examples = \" + str(X_train.shape[1]))\nprint (\"number of test examples = \" + str(X_test.shape[1]))\nprint (\"X_train shape: \" + str(X_train.shape))\nprint (\"Y_train shape: \" + str(Y_train.shape))\nprint (\"X_test shape: \" + str(X_test.shape))\nprint (\"Y_test shape: \" + str(Y_test.shape))",
"number of training examples = 1080\nnumber of test examples = 120\nX_train shape: (12288, 1080)\nY_train shape: (6, 1080)\nX_test shape: (12288, 120)\nY_test shape: (6, 120)\n"
]
],
[
[
"**Note** that 12288 comes from $64 \\times 64 \\times 3$. Each image is square, 64 by 64 pixels, and 3 is for the RGB colors. Please make sure all these shapes make sense to you before continuing.",
"_____no_output_____"
],
[
"**Your goal** is to build an algorithm capable of recognizing a sign with high accuracy. To do so, you are going to build a tensorflow model that is almost the same as one you have previously built in numpy for cat recognition (but now using a softmax output). It is a great occasion to compare your numpy implementation to the tensorflow one. \n\n**The model** is *LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX*. The SIGMOID output layer has been converted to a SOFTMAX. A SOFTMAX layer generalizes SIGMOID to when there are more than two classes. ",
"_____no_output_____"
],
[
"### 2.1 - Create placeholders\n\nYour first task is to create placeholders for `X` and `Y`. This will allow you to later pass your training data in when you run your session. \n\n**Exercise:** Implement the function below to create the placeholders in tensorflow.",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: create_placeholders\n\ndef create_placeholders(n_x, n_y):\n \"\"\"\n Creates the placeholders for the tensorflow session.\n \n Arguments:\n n_x -- scalar, size of an image vector (num_px * num_px = 64 * 64 * 3 = 12288)\n n_y -- scalar, number of classes (from 0 to 5, so -> 6)\n \n Returns:\n X -- placeholder for the data input, of shape [n_x, None] and dtype \"float\"\n Y -- placeholder for the input labels, of shape [n_y, None] and dtype \"float\"\n \n Tips:\n - You will use None because it let's us be flexible on the number of examples you will for the placeholders.\n In fact, the number of examples during test/train is different.\n \"\"\"\n\n ### START CODE HERE ### (approx. 2 lines)\n X = tf.placeholder(tf.float32, [n_x, None], name=\"X\")\n Y = tf.placeholder(tf.float32, [n_y, None], name=\"Y\")\n ### END CODE HERE ###\n \n return X, Y",
"_____no_output_____"
],
[
"X, Y = create_placeholders(12288, 6)\nprint (\"X = \" + str(X))\nprint (\"Y = \" + str(Y))",
"X = Tensor(\"X_4:0\", shape=(12288, ?), dtype=float32)\nY = Tensor(\"Y:0\", shape=(6, ?), dtype=float32)\n"
]
],
[
[
"**Expected Output**: \n\n<table> \n <tr> \n <td>\n **X**\n </td>\n <td>\n Tensor(\"Placeholder_1:0\", shape=(12288, ?), dtype=float32) (not necessarily Placeholder_1)\n </td>\n </tr>\n <tr> \n <td>\n **Y**\n </td>\n <td>\n Tensor(\"Placeholder_2:0\", shape=(10, ?), dtype=float32) (not necessarily Placeholder_2)\n </td>\n </tr>\n\n</table>",
"_____no_output_____"
],
[
"### 2.2 - Initializing the parameters\n\nYour second task is to initialize the parameters in tensorflow.\n\n**Exercise:** Implement the function below to initialize the parameters in tensorflow. You are going use Xavier Initialization for weights and Zero Initialization for biases. The shapes are given below. As an example, to help you, for W1 and b1 you could use: \n\n```python\nW1 = tf.get_variable(\"W1\", [25,12288], initializer = tf.contrib.layers.xavier_initializer(seed = 1))\nb1 = tf.get_variable(\"b1\", [25,1], initializer = tf.zeros_initializer())\n```\nPlease use `seed = 1` to make sure your results match ours.",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: initialize_parameters\n\ndef initialize_parameters():\n \"\"\"\n Initializes parameters to build a neural network with tensorflow. The shapes are:\n W1 : [25, 12288]\n b1 : [25, 1]\n W2 : [12, 25]\n b2 : [12, 1]\n W3 : [6, 12]\n b3 : [6, 1]\n \n Returns:\n parameters -- a dictionary of tensors containing W1, b1, W2, b2, W3, b3\n \"\"\"\n \n tf.set_random_seed(1) # so that your \"random\" numbers match ours\n \n ### START CODE HERE ### (approx. 6 lines of code)\n W1 = tf.get_variable(\"W1\", [25, 12288], initializer = tf.contrib.layers.xavier_initializer(seed=1))\n b1 = tf.get_variable(\"b1\", [25, 1], initializer = tf.zeros_initializer())\n W2 = tf.get_variable(\"W2\", [12, 25], initializer = tf.contrib.layers.xavier_initializer(seed=1))\n b2 = tf.get_variable(\"b2\", [12, 1], initializer = tf.zeros_initializer())\n W3 = tf.get_variable(\"W3\", [6, 12], initializer = tf.contrib.layers.xavier_initializer(seed=1))\n b3 = tf.get_variable(\"b3\", [6, 1], initializer = tf.zeros_initializer())\n ### END CODE HERE ###\n\n parameters = {\"W1\": W1,\n \"b1\": b1,\n \"W2\": W2,\n \"b2\": b2,\n \"W3\": W3,\n \"b3\": b3}\n \n return parameters",
"_____no_output_____"
],
[
"tf.reset_default_graph()\nwith tf.Session() as sess:\n parameters = initialize_parameters()\n print(\"W1 = \" + str(parameters[\"W1\"]))\n print(\"b1 = \" + str(parameters[\"b1\"]))\n print(\"W2 = \" + str(parameters[\"W2\"]))\n print(\"b2 = \" + str(parameters[\"b2\"]))",
"W1 = <tf.Variable 'W1:0' shape=(25, 12288) dtype=float32_ref>\nb1 = <tf.Variable 'b1:0' shape=(25, 1) dtype=float32_ref>\nW2 = <tf.Variable 'W2:0' shape=(12, 25) dtype=float32_ref>\nb2 = <tf.Variable 'b2:0' shape=(12, 1) dtype=float32_ref>\n"
]
],
[
[
"**Expected Output**: \n\n<table> \n <tr> \n <td>\n **W1**\n </td>\n <td>\n < tf.Variable 'W1:0' shape=(25, 12288) dtype=float32_ref >\n </td>\n </tr>\n <tr> \n <td>\n **b1**\n </td>\n <td>\n < tf.Variable 'b1:0' shape=(25, 1) dtype=float32_ref >\n </td>\n </tr>\n <tr> \n <td>\n **W2**\n </td>\n <td>\n < tf.Variable 'W2:0' shape=(12, 25) dtype=float32_ref >\n </td>\n </tr>\n <tr> \n <td>\n **b2**\n </td>\n <td>\n < tf.Variable 'b2:0' shape=(12, 1) dtype=float32_ref >\n </td>\n </tr>\n\n</table>",
"_____no_output_____"
],
[
"As expected, the parameters haven't been evaluated yet.",
"_____no_output_____"
],
[
"### 2.3 - Forward propagation in tensorflow \n\nYou will now implement the forward propagation module in tensorflow. The function will take in a dictionary of parameters and it will complete the forward pass. The functions you will be using are: \n\n- `tf.add(...,...)` to do an addition\n- `tf.matmul(...,...)` to do a matrix multiplication\n- `tf.nn.relu(...)` to apply the ReLU activation\n\n**Question:** Implement the forward pass of the neural network. We commented for you the numpy equivalents so that you can compare the tensorflow implementation to numpy. It is important to note that the forward propagation stops at `z3`. The reason is that in tensorflow the last linear layer output is given as input to the function computing the loss. Therefore, you don't need `a3`!\n\n",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: forward_propagation\n\ndef forward_propagation(X, parameters):\n \"\"\"\n Implements the forward propagation for the model: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX\n \n Arguments:\n X -- input dataset placeholder, of shape (input size, number of examples)\n parameters -- python dictionary containing your parameters \"W1\", \"b1\", \"W2\", \"b2\", \"W3\", \"b3\"\n the shapes are given in initialize_parameters\n\n Returns:\n Z3 -- the output of the last LINEAR unit\n \"\"\"\n \n # Retrieve the parameters from the dictionary \"parameters\" \n W1 = parameters['W1']\n b1 = parameters['b1']\n W2 = parameters['W2']\n b2 = parameters['b2']\n W3 = parameters['W3']\n b3 = parameters['b3']\n \n ### START CODE HERE ### (approx. 5 lines) # Numpy Equivalents:\n Z1 = tf.add(tf.matmul(W1, X), b1) # Z1 = np.dot(W1, X) + b1\n A1 = tf.nn.relu(Z1) # A1 = relu(Z1)\n Z2 = tf.add(tf.matmul(W2, A1), b2) # Z2 = np.dot(W2, a1) + b2\n A2 = tf.nn.relu(Z2) # A2 = relu(Z2)\n Z3 = tf.add(tf.matmul(W3, A2), b3) # Z3 = np.dot(W3,Z2) + b3\n ### END CODE HERE ###\n \n return Z3",
"_____no_output_____"
],
[
"tf.reset_default_graph()\n\nwith tf.Session() as sess:\n X, Y = create_placeholders(12288, 6)\n parameters = initialize_parameters()\n Z3 = forward_propagation(X, parameters)\n print(\"Z3 = \" + str(Z3))",
"Z3 = Tensor(\"Add_2:0\", shape=(6, ?), dtype=float32)\n"
]
],
[
[
"**Expected Output**: \n\n<table> \n <tr> \n <td>\n **Z3**\n </td>\n <td>\n Tensor(\"Add_2:0\", shape=(6, ?), dtype=float32)\n </td>\n </tr>\n\n</table>",
"_____no_output_____"
],
[
"You may have noticed that the forward propagation doesn't output any cache. You will understand why below, when we get to brackpropagation.",
"_____no_output_____"
],
[
"### 2.4 Compute cost\n\nAs seen before, it is very easy to compute the cost using:\n```python\ntf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = ..., labels = ...))\n```\n**Question**: Implement the cost function below. \n- It is important to know that the \"`logits`\" and \"`labels`\" inputs of `tf.nn.softmax_cross_entropy_with_logits` are expected to be of shape (number of examples, num_classes). We have thus transposed Z3 and Y for you.\n- Besides, `tf.reduce_mean` basically does the summation over the examples.",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: compute_cost \n\ndef compute_cost(Z3, Y):\n \"\"\"\n Computes the cost\n \n Arguments:\n Z3 -- output of forward propagation (output of the last LINEAR unit), of shape (6, number of examples)\n Y -- \"true\" labels vector placeholder, same shape as Z3\n \n Returns:\n cost - Tensor of the cost function\n \"\"\"\n \n # to fit the tensorflow requirement for tf.nn.softmax_cross_entropy_with_logits(...,...)\n logits = tf.transpose(Z3)\n labels = tf.transpose(Y)\n \n ### START CODE HERE ### (1 line of code)\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))\n ### END CODE HERE ###\n \n return cost",
"_____no_output_____"
],
[
"tf.reset_default_graph()\n\nwith tf.Session() as sess:\n X, Y = create_placeholders(12288, 6)\n parameters = initialize_parameters()\n Z3 = forward_propagation(X, parameters)\n cost = compute_cost(Z3, Y)\n print(\"cost = \" + str(cost))",
"cost = Tensor(\"Mean:0\", shape=(), dtype=float32)\n"
]
],
[
[
"**Expected Output**: \n\n<table> \n <tr> \n <td>\n **cost**\n </td>\n <td>\n Tensor(\"Mean:0\", shape=(), dtype=float32)\n </td>\n </tr>\n\n</table>",
"_____no_output_____"
],
[
"### 2.5 - Backward propagation & parameter updates\n\nThis is where you become grateful to programming frameworks. All the backpropagation and the parameters update is taken care of in 1 line of code. It is very easy to incorporate this line in the model.\n\nAfter you compute the cost function. You will create an \"`optimizer`\" object. You have to call this object along with the cost when running the tf.session. When called, it will perform an optimization on the given cost with the chosen method and learning rate.\n\nFor instance, for gradient descent the optimizer would be:\n```python\noptimizer = tf.train.GradientDescentOptimizer(learning_rate = learning_rate).minimize(cost)\n```\n\nTo make the optimization you would do:\n```python\n_ , c = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y})\n```\n\nThis computes the backpropagation by passing through the tensorflow graph in the reverse order. From cost to inputs.\n\n**Note** When coding, we often use `_` as a \"throwaway\" variable to store values that we won't need to use later. Here, `_` takes on the evaluated value of `optimizer`, which we don't need (and `c` takes the value of the `cost` variable). ",
"_____no_output_____"
],
[
"### 2.6 - Building the model\n\nNow, you will bring it all together! \n\n**Exercise:** Implement the model. You will be calling the functions you had previously implemented.",
"_____no_output_____"
]
],
[
[
"def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.0001,\n num_epochs = 1500, minibatch_size = 32, print_cost = True):\n \"\"\"\n Implements a three-layer tensorflow neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX.\n \n Arguments:\n X_train -- training set, of shape (input size = 12288, number of training examples = 1080)\n Y_train -- test set, of shape (output size = 6, number of training examples = 1080)\n X_test -- training set, of shape (input size = 12288, number of training examples = 120)\n Y_test -- test set, of shape (output size = 6, number of test examples = 120)\n learning_rate -- learning rate of the optimization\n num_epochs -- number of epochs of the optimization loop\n minibatch_size -- size of a minibatch\n print_cost -- True to print the cost every 100 epochs\n \n Returns:\n parameters -- parameters learnt by the model. They can then be used to predict.\n \"\"\"\n \n ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables\n tf.set_random_seed(1) # to keep consistent results\n seed = 3 # to keep consistent results\n (n_x, m) = X_train.shape # (n_x: input size, m : number of examples in the train set)\n n_y = Y_train.shape[0] # n_y : output size\n costs = [] # To keep track of the cost\n \n # Create Placeholders of shape (n_x, n_y)\n ### START CODE HERE ### (1 line)\n X, Y = create_placeholders(n_x, n_y)\n ### END CODE HERE ###\n\n # Initialize parameters\n ### START CODE HERE ### (1 line)\n parameters = initialize_parameters()\n ### END CODE HERE ###\n \n # Forward propagation: Build the forward propagation in the tensorflow graph\n ### START CODE HERE ### (1 line)\n Z3 = forward_propagation(X, parameters)\n ### END CODE HERE ###\n \n # Cost function: Add cost function to tensorflow graph\n ### START CODE HERE ### (1 line)\n cost = compute_cost(Z3, Y)\n ### END CODE HERE ###\n \n # Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer.\n ### START CODE HERE ### (1 line)\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n ### END CODE HERE ###\n \n # Initialize all the variables\n init = tf.global_variables_initializer()\n\n # Start the session to compute the tensorflow graph\n with tf.Session() as sess:\n \n # Run the initialization\n sess.run(init)\n \n # Do the training loop\n for epoch in range(num_epochs):\n\n epoch_cost = 0. # Defines a cost related to an epoch\n num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set\n seed = seed + 1\n minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed)\n\n for minibatch in minibatches:\n\n # Select a minibatch\n (minibatch_X, minibatch_Y) = minibatch\n \n # IMPORTANT: The line that runs the graph on a minibatch.\n # Run the session to execute the \"optimizer\" and the \"cost\", the feedict should contain a minibatch for (X,Y).\n ### START CODE HERE ### (1 line)\n _ , minibatch_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y})\n ### END CODE HERE ###\n \n epoch_cost += minibatch_cost / num_minibatches\n\n # Print the cost every epoch\n if print_cost == True and epoch % 100 == 0:\n print (\"Cost after epoch %i: %f\" % (epoch, epoch_cost))\n if print_cost == True and epoch % 5 == 0:\n costs.append(epoch_cost)\n \n # plot the cost\n plt.plot(np.squeeze(costs))\n plt.ylabel('cost')\n plt.xlabel('iterations (per tens)')\n plt.title(\"Learning rate =\" + str(learning_rate))\n plt.show()\n\n # lets save the parameters in a variable\n parameters = sess.run(parameters)\n print (\"Parameters have been trained!\")\n\n # Calculate the correct predictions\n correct_prediction = tf.equal(tf.argmax(Z3), tf.argmax(Y))\n\n # Calculate accuracy on the test set\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n\n print (\"Train Accuracy:\", accuracy.eval({X: X_train, Y: Y_train}))\n print (\"Test Accuracy:\", accuracy.eval({X: X_test, Y: Y_test}))\n \n return parameters",
"_____no_output_____"
]
],
[
[
"Run the following cell to train your model! On our machine it takes about 5 minutes. Your \"Cost after epoch 100\" should be 1.016458. If it's not, don't waste time; interrupt the training by clicking on the square (⬛) in the upper bar of the notebook, and try to correct your code. If it is the correct cost, take a break and come back in 5 minutes!",
"_____no_output_____"
]
],
[
[
"parameters = model(X_train, Y_train, X_test, Y_test)",
"Cost after epoch 0: 1.855702\nCost after epoch 100: 1.016458\nCost after epoch 200: 0.733102\nCost after epoch 300: 0.572940\nCost after epoch 400: 0.468774\nCost after epoch 500: 0.381021\nCost after epoch 600: 0.313822\nCost after epoch 700: 0.254158\nCost after epoch 800: 0.203829\nCost after epoch 900: 0.166421\nCost after epoch 1000: 0.141486\nCost after epoch 1100: 0.107580\nCost after epoch 1200: 0.086270\nCost after epoch 1300: 0.059371\nCost after epoch 1400: 0.052228\n"
]
],
[
[
"**Expected Output**:\n\n<table> \n <tr> \n <td>\n **Train Accuracy**\n </td>\n <td>\n 0.999074\n </td>\n </tr>\n <tr> \n <td>\n **Test Accuracy**\n </td>\n <td>\n 0.716667\n </td>\n </tr>\n\n</table>\n\nAmazing, your algorithm can recognize a sign representing a figure between 0 and 5 with 71.7% accuracy.\n\n**Insights**:\n- Your model seems big enough to fit the training set well. However, given the difference between train and test accuracy, you could try to add L2 or dropout regularization to reduce overfitting. \n- Think about the session as a block of code to train the model. Each time you run the session on a minibatch, it trains the parameters. In total you have run the session a large number of times (1500 epochs) until you obtained well trained parameters.",
"_____no_output_____"
],
[
"### 2.7 - Test with your own image (optional / ungraded exercise)\n\nCongratulations on finishing this assignment. You can now take a picture of your hand and see the output of your model. To do that:\n 1. Click on \"File\" in the upper bar of this notebook, then click \"Open\" to go on your Coursera Hub.\n 2. Add your image to this Jupyter Notebook's directory, in the \"images\" folder\n 3. Write your image's name in the following code\n 4. Run the code and check if the algorithm is right!",
"_____no_output_____"
]
],
[
[
"import scipy\nfrom PIL import Image\nfrom scipy import ndimage\n\n## START CODE HERE ## (PUT YOUR IMAGE NAME) \nmy_image = \"thumbs_up.jpg\"\n## END CODE HERE ##\n\n# We preprocess your image to fit your algorithm.\nfname = \"images/\" + my_image\nimage = np.array(ndimage.imread(fname, flatten=False))\nmy_image = scipy.misc.imresize(image, size=(64,64)).reshape((1, 64*64*3)).T\nmy_image_prediction = predict(my_image, parameters)\n\nplt.imshow(image)\nprint(\"Your algorithm predicts: y = \" + str(np.squeeze(my_image_prediction)))",
"Your algorithm predicts: y = 3\n"
]
],
[
[
"You indeed deserved a \"thumbs-up\" although as you can see the algorithm seems to classify it incorrectly. The reason is that the training set doesn't contain any \"thumbs-up\", so the model doesn't know how to deal with it! We call that a \"mismatched data distribution\" and it is one of the various of the next course on \"Structuring Machine Learning Projects\".",
"_____no_output_____"
],
[
"<font color='blue'>\n**What you should remember**:\n- Tensorflow is a programming framework used in deep learning\n- The two main object classes in tensorflow are Tensors and Operators. \n- When you code in tensorflow you have to take the following steps:\n - Create a graph containing Tensors (Variables, Placeholders ...) and Operations (tf.matmul, tf.add, ...)\n - Create a session\n - Initialize the session\n - Run the session to execute the graph\n- You can execute the graph multiple times as you've seen in model()\n- The backpropagation and optimization is automatically done when running the session on the \"optimizer\" object.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
4a1aa582b136c14a696669bbcc653ce418cc907a
| 95,118 |
ipynb
|
Jupyter Notebook
|
analysis/ifn_hbec/version2/figure_4/1d_stim_vs_ctrl.ipynb
|
yelabucsf/scrna-parameter-estimation
|
218ef38b87f8d777d5abcb04913212cbcb21ecb1
|
[
"MIT"
] | 2 |
2021-03-17T20:31:54.000Z
|
2022-03-17T19:24:37.000Z
|
analysis/ifn_hbec/version2/figure_4/1d_stim_vs_ctrl.ipynb
|
yelabucsf/scrna-parameter-estimation
|
218ef38b87f8d777d5abcb04913212cbcb21ecb1
|
[
"MIT"
] | 1 |
2021-08-23T20:55:07.000Z
|
2021-08-23T20:55:07.000Z
|
analysis/ifn_hbec/version2/figure_4/1d_stim_vs_ctrl.ipynb
|
yelabucsf/scrna-parameter-estimation
|
218ef38b87f8d777d5abcb04913212cbcb21ecb1
|
[
"MIT"
] | 1 |
2020-04-06T05:43:31.000Z
|
2020-04-06T05:43:31.000Z
| 48.703533 | 261 | 0.578303 |
[
[
[
"# 1D Variability hypothesis testing for HBEC IFN experiment",
"_____no_output_____"
]
],
[
[
"import scanpy as sc\nimport seaborn as sns\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.stats as stats\nfrom pybedtools import BedTool\nimport pickle as pkl\n%matplotlib inline",
"_____no_output_____"
],
[
"import sys\nsys.path.append('/home/ssm-user/Github/scrna-parameter-estimation/dist/memento-0.0.6-py3.8.egg')\nsys.path.append('/home/ssm-user/Github/misc-seq/miscseq/')\nimport encode\nimport memento",
"_____no_output_____"
],
[
"data_path = '/data_volume/memento/hbec/'",
"_____no_output_____"
]
],
[
[
"### Read the processed RNA data\n\nFocus on the club and bc/club cells and type I interferons for now.\n\nEncode the timestamps to integers.",
"_____no_output_____"
]
],
[
[
"adata_processed = sc.read(data_path + 'HBEC_type_I_processed.h5ad')",
"_____no_output_____"
]
],
[
[
"adata = sc.read(data_path + 'HBEC_type_I_filtered_counts_deep.h5ad')",
"_____no_output_____"
],
[
"adata = adata[:, ~adata.var.index.str.startswith('MT-')].copy()\n# adata.obs['cell_type'] = adata.obs['cell_type'].apply(lambda x: x if x != 'basal/club' else 'bc')\n# adata.obs['cell_type'] = adata.obs['cell_type'].apply(lambda x: x if x != 'ionocyte/tuft' else 'ion-tuft')",
"/home/ssm-user/anaconda3/envs/single_cell/lib/python3.8/site-packages/pandas/core/arrays/categorical.py:2487: FutureWarning: The `inplace` parameter in pandas.Categorical.remove_unused_categories is deprecated and will be removed in a future version.\n res = method(*args, **kwargs)\n"
]
],
[
[
"sc.pl.umap(adata_processed, color=['cell_type', 'time', 'stim'])",
"_____no_output_____"
]
],
[
[
"converter = {'basal/club':'BC', 'basal':'B', 'ciliated':'C', 'goblet':'G', 'ionocyte/tuft':'IT', 'neuroendo':'N'}",
"_____no_output_____"
],
[
"adata.obs['ct'] = adata.obs['cell_type'].apply(lambda x: converter[x])",
"_____no_output_____"
]
],
[
[
"### Setup memento",
"_____no_output_____"
]
],
[
[
"def assign_q(batch):\n \n if batch == 0:\n return 0.387*0.25\n elif batch == 1:\n return 0.392*0.25\n elif batch == 2:\n return 0.436*0.25\n else:\n return 0.417*0.25",
"_____no_output_____"
],
[
"adata.obs['q'] = adata.obs['batch'].apply(assign_q)",
"_____no_output_____"
],
[
"memento.setup_memento(adata, q_column='q')",
"Version 0.0.6\n"
]
],
[
[
"### Run memento for each subset, comparing to control",
"_____no_output_____"
]
],
[
[
"cts = ['C', 'B', 'BC']\ntps = ['3', '6', '9', '24', '48']\n\nstims = ['alpha', 'beta', 'gamma', 'lambda']",
"_____no_output_____"
],
[
"import os\ndone_files = os.listdir(data_path + 'binary_test_latest/')",
"_____no_output_____"
],
[
"for ct in cts:\n for tp in tps:\n for stim in stims:\n \n fname = '{}_{}_{}.h5ad'.format('-'.join(ct), stim, tp)\n \n if fname in done_files:\n print('Skipping', fname)\n continue\n\n print('starting', ct, tp, stim)\n\n adata_stim = adata.copy()[\n adata.obs.ct.isin([ct]) & \\\n adata.obs.stim.isin(['control', stim]) & \\\n adata.obs.time.isin(['0',tp]), :].copy()\n time_converter={0:0, int(tp):1}\n adata_stim.obs['time_step'] = adata_stim.obs['time'].astype(int).apply(lambda x: time_converter[x])\n\n memento.create_groups(adata_stim, label_columns=['time_step', 'donor'])\n memento.compute_1d_moments(adata_stim, min_perc_group=.9)\n\n memento.ht_1d_moments(\n adata_stim, \n formula_like='1 + time_step + donor',\n treatment_col='time_step', \n num_boot=10000, \n verbose=1,\n num_cpus=93,\n resampling='permutation',\n approx=True)\n\n adata_stim.write(data_path + 'binary_test_latest/{}_{}_{}.h5ad'.format(ct, stim, tp))",
"starting C 3 alpha\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"raw",
"code",
"raw",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"raw"
],
[
"code",
"code"
],
[
"raw"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
4a1abbf022dedd11ccf7747c9f9962690ad29732
| 53,842 |
ipynb
|
Jupyter Notebook
|
HW6/.ipynb_checkpoints/HW6_final-checkpoint.ipynb
|
filip-michalsky/CS207_Systems_Development
|
4790c3101e3037d7741565198e814637e34eaff9
|
[
"MIT"
] | null | null | null |
HW6/.ipynb_checkpoints/HW6_final-checkpoint.ipynb
|
filip-michalsky/CS207_Systems_Development
|
4790c3101e3037d7741565198e814637e34eaff9
|
[
"MIT"
] | null | null | null |
HW6/.ipynb_checkpoints/HW6_final-checkpoint.ipynb
|
filip-michalsky/CS207_Systems_Development
|
4790c3101e3037d7741565198e814637e34eaff9
|
[
"MIT"
] | null | null | null | 38.077793 | 1,247 | 0.541306 |
[
[
[
"# Homework 6\n## Due: Tuesday, October 10 at 11:59 PM",
"_____no_output_____"
],
[
"# Problem 1: Bank Account Revisited\n\nWe are going to rewrite the bank account closure problem we had a few assignments ago, only this time developing a formal class for a Bank User and Bank Account to use in our closure (recall previously we just had a nonlocal variable amount that we changed). ",
"_____no_output_____"
],
[
"### Some Preliminaries:\nFirst we are going to define two types of bank accounts. Use the code below to do this:",
"_____no_output_____"
]
],
[
[
"from enum import Enum\nclass AccountType(Enum):\n SAVINGS = 1\n CHECKING = 2",
"_____no_output_____"
]
],
[
[
"An Enum stands for an enumeration, it's a convenient way for you to define lists of things. Typing:",
"_____no_output_____"
]
],
[
[
"AccountType.SAVINGS",
"_____no_output_____"
]
],
[
[
"returns a Python representation of an enumeration. You can compare these account types:",
"_____no_output_____"
]
],
[
[
"AccountType.SAVINGS == AccountType.SAVINGS",
"_____no_output_____"
],
[
"AccountType.SAVINGS == AccountType.CHECKING",
"_____no_output_____"
]
],
[
[
"To get a string representation of an Enum, you can use:",
"_____no_output_____"
]
],
[
[
"AccountType.SAVINGS.name",
"_____no_output_____"
],
[
"raise ValueError(\"ba\")",
"_____no_output_____"
]
],
[
[
"### Part 1: Create a BankAccount class with the following specification:\n\nConstructor is `BankAccount(self, owner, accountType)` where `owner` is a string representing the name of the account owner and `accountType` is one of the AccountType enums\n\nMethods `withdraw(self, amount)` and `deposit(self, amount)` to modify the account balance of the account\n\nOverride methods `__str__` to write an informative string of the account owner and the type of account, and `__len__` to return the balance of the account",
"_____no_output_____"
]
],
[
[
"class BankAccount():\n \"Class Bank Account specifying owner and Account type\"\n def __init__(self, owner, accountType):\n self.owner = owner\n self.balance = 0\n self.AccountType = accountType\n \n def withdraw(self,amount):\n if amount <= 0:\n raise ValueError(\"Cannot withdraw a negative value\")\n if amount > self.balance:\n raise ValueError(\"Insufficient funds\")\n \n self.balance = self.balance - amount\n print(\"Withdrawal of \",amount,\" from account \",self.AccountType.name,\". Remaining balance \", self.balance)\n \n def deposit(self,deposit):\n if deposit <=0:\n raise ValueError\n else:\n self.balance +=deposit\n print(\"Deposit of \", deposit, \" and new balance is \", self.balance)\n \n def __str__(self):\n \n return \"Owner of account \"+ self.owner + \" and account type is \" + self.AccountType.name\n \n def __len__(self):\n #return the balance of acc\n return self.balance\n \nBankacc1 = BankAccount(\"Filip M\", AccountType.SAVINGS) \nprint(Bankacc1)\n\nBankacc1.deposit(100)\nprint(len(Bankacc1))\n \n \n \n ",
"Owner of account Filip M and account type is SAVINGS\nDeposit of 100 and new balance is 100\n100\n"
]
],
[
[
"### Part 2: Write a class BankUser with the following specification:\n\nConstructor `BankUser(self, owner)` where `owner` is the name of the account.\n\nMethod `addAccount(self, accountType)` - to start, a user will have no accounts when the BankUser object is created. `addAccount` will add a new account to the user of the `accountType` specified. **Only one savings/checking account per user, return appropriate error otherwise**\n\nMethods `getBalance(self, accountType)`, `deposit(self, accountType, amount)`, and `withdraw(self, accountType, amount)` for a specific AccountType.\n\nOverride `__str__` to have an informative summary of user's accounts.",
"_____no_output_____"
]
],
[
[
"class BankUser():\n \n def __init__(self,owner):\n self.owner = owner\n self.savings = None\n self.checking = None\n self.myCaccount = None\n self.mySaccount = None\n \n def addAccount(self, accountType):\n #Check what account type and then whether the account is there\n if accountType == AccountType.SAVINGS:\n if self.savings == None:\n self.savings = 1\n self.mySaccount = BankAccount(self.owner,accountType)\n \n else:\n raise ValueError(\"You already have a savings account\")\n \n elif accountType == AccountType.CHECKING:\n if self.checking == None:\n self.savings = 1\n self.myCaccount = BankAccount(self.owner,accountType)\n \n else:\n raise ValueError(\"You already have a checking account\")\n else:\n raise ValueError(\"Account type is not valid\")\n \n def getBalance(self,accountType):\n if accountType == AccountType.SAVINGS:\n if self.mySaccount == None:\n raise ValueError(\"No savings account\")\n else:\n return self.mySaccount.balance\n \n elif accountType == AccountType.CHECKING:\n if self.myCaccount == None:\n raise ValueError(\"No checking account\")\n else:\n return self.myCaccount.balance\n else:\n raise ValueError(\"Invalid account type inserted\")\n \n def deposit(self,accountType,amount):\n if accountType == AccountType.SAVINGS:\n if self.mySaccount == None:\n raise ValueError(\"No savings account\")\n else:\n return self.mySaccount.deposit(amount)\n \n elif accountType == AccountType.CHECKING:\n if self.myCaccount == None:\n raise ValueError(\"No checking account\")\n else:\n return self.myCaccount.deposit(amount)\n \n else:\n raise ValueError(\"Invalid account type inserted\")\n \n def withdraw(self,accountType,amount):\n #analogous to deposit\n if accountType == AccountType.SAVINGS:\n if self.mySaccount == None:\n raise ValueError(\"No savings account\")\n else:\n return self.mySaccount.withdraw(amount)\n \n elif accountType == AccountType.CHECKING:\n if self.myCaccount == None:\n raise ValueError(\"No checking account\")\n else:\n return self.myCaccount.withdraw(amount)\n \n else:\n raise ValueError(\"Invalid account type inserted\")\n ",
"_____no_output_____"
],
[
"user1 = BankUser(\"Filip\")\nuser1.addAccount(AccountType.CHECKING)\nuser1.getBalance(AccountType.CHECKING)\nuser1.deposit(AccountType.CHECKING,7500)\nuser1.withdraw(AccountType.CHECKING,-2500)",
"Deposit of 7500 and new balance is 7500\n"
]
],
[
[
"Write some simple tests to make sure this is working. Think of edge scenarios a user might try to do.",
"_____no_output_____"
]
],
[
[
"def test_withdraw_negative():\n try:\n user1 = BankUser(\"Filip\")\n user1.addAccount(AccountType.CHECKING)\n user1.withdraw(AccountType.CHECKING,-2500)\n except ValueError as Valer:\n assert(type(Valer)==ValueError)",
"_____no_output_____"
],
[
"test_withdraw_negative()",
"_____no_output_____"
]
],
[
[
"### Part 3: ATM Closure\n\nFinally, we are going to rewrite a closure to use our bank account. We will make use of the [input function](http://anh.cs.luc.edu/python/hands-on/3.1/handsonHtml/io.html) which takes user input to decide what actions to take.\n\nWrite a closure called ATMSession(bankUser) which takes in a BankUser object. Return a method called Interface that when called, would provide the following interface:\n\nFirst screen for user will look like:\n\n\n**Enter Option:**\n\n**1)Exit**\n\n**2)Create Account**\n\n**3)Check Balance**\n\n**4)Deposit**\n\n**5)Withdraw**\n\n\nPressing 1 will exit, any other option will show the options:\n\n\n\n**Enter Option:**\n\n**1)Checking**\n\n**2)Savings**\n\n\nIf a deposit or withdraw was chosen, then there must be a third screen:\n\n\n\n**Enter Integer Amount, Cannot Be Negative:**\n\nThis is to keep the code relatively simple, if you'd like you can also curate the options depending on the BankUser object (for example, if user has no accounts then only show the Create Account option), but this is up to you. In any case, you must handle any input from the user in a reasonable way that an actual bank would be okay with, and give the user a proper response to the action specified.\n\nUpon finishing a transaction or viewing balance, it should go back to the original screen",
"_____no_output_____"
]
],
[
[
"def ATMSession(bankUser):\n def interface():\n while True:\n ",
"_____no_output_____"
]
],
[
[
"### Part 4: Put everything in a module Bank.py\n\nWe will be grading this problem with a test suite. Put the enum, classes, and closure in a single file named Bank.py. It is very important that the class and method specifications we provided are used (with the same capitalization), otherwise you will receive no credit.",
"_____no_output_____"
],
[
"---",
"_____no_output_____"
],
[
"## Problem 2: Linear Regression Class\n\nLet's say you want to create Python classes for three related types of linear regression: Ordinary Least Squares Linear Regression, Ridge Regression, and Lasso Regression. ",
"_____no_output_____"
],
[
"Consider the multivariate linear model:\n\n$$y = X\\beta + \\epsilon$$\n\nwhere $y$ is a length $n$ vector, $X$ is an $m \\times p$ matrix, and $\\beta$\nis a $p$ length vector of coefficients.\n\n#### Ordinary Least Squares Linear Regression\n\n[OLS Regression](https://en.wikipedia.org/wiki/Ordinary_least_squares) seeks to minimize the following cost function:\n\n$$\\|y - \\beta\\mathbf {X}\\|^{2}$$\n\nThe best fit coefficients can be obtained by:\n\n$$\\hat{\\beta} = (X^T X)^{-1}X^Ty$$\n\nwhere $X^T$ is the transpose of the matrix $X$ and $X^{-1}$ is the inverse of the matrix $X$.\n\n#### Ridge Regression\n\n[Ridge Regression](https://en.wikipedia.org/wiki/Tikhonov_regularization) introduces an L2 regularization term to the cost function:\n\n$$\\|y - \\beta\\mathbf {X}\\|^{2}+\\|\\Gamma \\mathbf {x} \\|^{2}$$\n\nWhere $\\Gamma = \\alpha I$ for some constant $\\alpha$ and the identity matrix $I$.\n\nThe best fit coefficients can be obtained by:\n$$\\hat{\\beta} = (X^T X+\\Gamma^T\\Gamma)^{-1}X^Ty$$\n\n#### Lasso Regression\n\n[Lasso Regression](https://en.wikipedia.org/wiki/Lasso_%28statistics%29) introduces an L1 regularization term and restricts the total number of predictor variables in the model.\nThe following cost function:\n$${\\displaystyle \\min _{\\beta _{0},\\beta }\\left\\{{\\frac {1}{m}}\\left\\|y-\\beta _{0}-X\\beta \\right\\|_{2}^{2}\\right\\}{\\text{ subject to }}\\|\\beta \\|_{1}\\leq \\alpha.}$$\n\ndoes not have a nice closed form solution. For the sake of this exercise, you may use the [sklearn.linear_model.Lasso](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Lasso.html) class, which uses a coordinate descent algorithm to find the best fit. You should only use the class in the fit() method of this exercise (ie. do not re-use the sklearn for other methods in your class).\n\n#### $R^2$ score\n\nThe $R^2$ score is defined as:\n$${R^{2} = {1-{SS_E \\over SS_T}}}$$\n\nWhere:\n\n$$SS_T=\\sum_i (y_i-\\bar{y})^2, SS_R=\\sum_i (\\hat{y_i}-\\bar{y})^2, SS_E=\\sum_i (y_i - \\hat{y_i})^2$$\n\nwhere ${y_i}$ are the original data values, $\\hat{y_i}$ are the predicted values, and $\\bar{y_i}$ is the mean of the original data values.",
"_____no_output_____"
],
[
"### Part 1: Base Class\nWrite a class called `Regression` with the following methods:\n\n$fit(X, y)$: Fits linear model to $X$ and $y$.\n\n$get\\_params()$: Returns $\\hat{\\beta}$ for the fitted model. The parameters should be stored in a dictionary.\n\n$predict(X)$: Predict new values with the fitted model given $X$.\n\n$score(X, y)$: Returns $R^2$ value of the fitted model.\n\n$set\\_params()$: Manually set the parameters of the linear model.\n\nThis parent class should throw a `NotImplementedError` for methods that are intended to be implemented by subclasses.",
"_____no_output_____"
]
],
[
[
"class Regression:\n \n def __init__(self):\n self.X = 0\n self.y = 0\n self.betas = 0\n self.alpha=0.1\n \n def fit(self,X,y):\n #reshape x and y arrays to get the right matrices for regre\n\n raise NotImplementedError(\"Subclasses should implement this!\")\n \n def get_params(self):\n self_betas=self.betas.reshape(-1,1)\n print(self.betas.shape)\n betas_dict={}\n for index,value in enumerate(self.betas):\n betas_dict[index]=value\n return betas_dict\n \n def predict(self,X):\n #print(self.betas.shape)\n #print(sm.add_constant(X).T.shape)\n #print(self.betas)\n #print(sm.add_constant(X).T)\n ypred = np.dot(self.betas,(sm.add_constant(X).T))\n \n #raise NotImplementedError(\"Subclasses should implement this!\")\n return ypred\n \n def score(self,X,y):\n ypred = self.predict(X)\n ypred = ypred.reshape(-1,1)\n #print(ypred)\n y_mean=np.mean(y)\n y=y.reshape(-1,1)\n #print(\"y_mean\")\n #print(y_mean)\n #print(\"y-y_pred\")\n #print((y - ypred)*(y - ypred))\n #print(((y - ypred)).shape)\n #print((y - y_mean)*(y-y_mean))\n #print(np.sum((y - y_mean**2)))\n score = 1- (np.sum((y - ypred)*(y - ypred))/np.sum((y - y_mean)*(y - y_mean)))\n #raise NotImplementedError(\"Subclasses should implement this!\")\n return score\n \n def set_params(self):\n beta0 = float(input(\"Please input zero coefficient\"))\n beta1 = float(input(\"Please input first coefficient\"))\n self.betas = [beta0,beta1]\n return None\n ",
"_____no_output_____"
],
[
"#import statsmodels.api as sm\n\nx_train = np.array([1, 2, 3, 4])\ny_train = np.array([2, 2, 4, 5])\n#print(y_train.reshape(-1,1))\ntest_regr = Regression(x_train,y_train)\n\n#test_regr.fit(x_train,y_train)\n#test_regr.set_params()\n#test_regr.get_params()\n",
"_____no_output_____"
]
],
[
[
"### Part 2: OLS Linear Regression\n\nWrite a class called `OLSRegression` that implements the OLS Regression model described above and inherits the `Regression` class.",
"_____no_output_____"
]
],
[
[
"class OLSRegression(Regression):\n \n \n def fit(self,X,y):\n #reshape x and y arrays to get the right matrices for regre\n self.X = X\n self.y= y\n #x_train = self.X.reshape(len(self.X),1)\n #y_train = self.y.reshape(len(self.y),1)\n\n #build matrix X by concatenating predictors and a column of ones\n n = x_train.shape[0]\n ones_col = np.ones((n, 1))\n X = np.concatenate((ones_col, x_train), axis=1)\n\n #matrix X^T X\n LHS = np.dot(np.transpose(X), X)\n #print(LHS)\n #matrix X^T Y\n RHS = np.dot(np.transpose(X), y_train)\n\n #solution beta to normal equations, since LHS is invertible by construction\n self.betas = np.dot(np.linalg.inv(LHS), RHS)\n print(self.betas)\n print(self.betas.shape)\n #return None\n\n ",
"_____no_output_____"
],
[
"x_train = np.array([1, 2, 3, 4])\ny_train = np.array([2, 2, 4, 5])\n#print(y_train.reshape(-1,1))\n\n#gamma = 3*np.eye(x_train.shape[0])\n#print(gamma)",
"_____no_output_____"
],
[
"test_regr1 = OLSRegression()",
"_____no_output_____"
],
[
"test_regr1.fit(x_train,y_train)\n#test_regr1.score(x_train,y_train)\n",
"[ 4.29470971e+01 -9.06047314e-02 6.23944402e-02 4.87219012e-02\n 3.02566349e+00 -1.94220512e+01 3.07062003e+00 3.90425727e-04\n -1.59531728e+00 3.32480192e-01 -1.36229857e-02 -9.49914931e-01\n 8.03221946e-03 -5.60407340e-01]\n(14,)\n"
],
[
"test_regr1.predict(x_train)",
"_____no_output_____"
],
[
"test_regr1.score(x_train,y_train)",
"_____no_output_____"
]
],
[
[
"### Part 3: Ridge Regression\n\nWrite a class called `RidgeRegression` that implements Ridge Regression and inherits the `OLSRegression` class.",
"_____no_output_____"
],
[
"$$\\hat{\\beta} = (X^T X+\\Gamma^T\\Gamma)^{-1}X^Ty$$",
"_____no_output_____"
]
],
[
[
"class RidgeRegression(OLSRegression):\n \n def fit(self,X,y,alpha):\n self.X = X\n self.y= y\n #x_train = self.X.reshape(len(self.X),1)\n #y_train = self.y.reshape(len(self.y),1)\n \n \n #build matrix X by concatenating predictors and a column of ones\n n = x_train.shape[0]\n ones_col = np.ones((n, 1))\n X = np.concatenate((ones_col, x_train), axis=1)\n \n \n #matrix X^T X\n LHS = np.dot(np.transpose(X), X)\n #print(LHS)\n gamma = alpha*np.eye(LHS.shape[0])\n #print(gamma)\n gammas = np.dot(np.transpose(gamma), gamma)\n \n \n #matrix X^T Y\n RHS = np.dot(np.transpose(X), y_train)\n\n #solution beta to normal equations, since LHS is invertible by construction\n self.betas = np.dot(np.linalg.inv(LHS+gammas), RHS)\n #print(self.betas)\n print(self.betas)\n print(self.betas.shape)\n return None",
"_____no_output_____"
],
[
"test_ridge = RidgeRegression()",
"_____no_output_____"
],
[
"test_ridge.fit(x_train,y_train,0.5)",
"[ 2.84218856e+01 -7.63768815e-02 6.31552225e-02 2.68199475e-02\n 3.06341561e+00 -1.20231448e+01 3.85526356e+00 -4.39073826e-03\n -1.37850799e+00 2.71551864e-01 -1.26884998e-02 -7.30987301e-01\n 1.02348576e-02 -5.18831212e-01]\n(14,)\n"
],
[
"test_ridge.predict(x_train)",
"(14,)\n(14, 354)\n"
],
[
"test_ridge.get_params()",
"(14,)\n"
],
[
"test_ridge.score(x_train,y_train)",
"_____no_output_____"
],
[
"test_ridge.get_params()",
"(14,)\n"
]
],
[
[
"### Part 3: Lasso Regression\n\nWrite a class called `LassoRegression` that implements Lasso Regression and inherits the `OLSRegression` class. You should only use Lasso(), Lasso.fit(), Lasso.coef_, and Lasso._intercept from the `sklearn.linear_model.Lasso` class.",
"_____no_output_____"
]
],
[
[
"from sklearn.linear_model import Lasso",
"_____no_output_____"
],
[
"class LassoRegression(OLSRegression):\n \n def fit(self,X,y):\n #X=X.reshape(len(X),1)\n #y=y.reshape(len(y),1)\n \n model = Lasso(alpha=self.alpha,fit_intercept=True)\n fitted_model= model.fit(X,y)\n intercept = fitted_model.intercept_\n coeffs = np.array(fitted_model.coef_)\n self.betas=np.insert(coeffs,0,intercept)\n #self.betas = self.betas.reshape(-1,1)\n #=np.array([fitted_model.intercept_[0],fitted_model.coef_[i]]).reshape(-1,1)\n print(self.betas)\n ",
"_____no_output_____"
],
[
"print(x_train.shape)\nprint(y_train.shape)",
"(354, 13)\n(354,)\n"
],
[
"lasso_test= LassoRegression()\nlasso_test.fit(x_train,y_train)\n#lasso_test.get_params()",
"[ 3.11520812e+01 -7.39116894e-02 6.56890339e-02 -2.18463416e-02\n 1.34374462e+00 -0.00000000e+00 2.91332913e+00 -9.89484561e-03\n -1.23514400e+00 3.01427281e-01 -1.61487463e-02 -7.46486997e-01\n 8.98172571e-03 -6.12541889e-01]\n"
],
[
"lasso_test.score(x_train,y_train)",
"(14,)\n(14, 354)\n"
]
],
[
[
"### Part 4: Model Scoring\nYou will use the [Boston dataset](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_boston.html) for this part.\n\nInstantiate each of the three models above. Using a for loop, fit (on the training data) and score (on the testing data) each model on the Boston dataset. \n\nPrint out the $R^2$ value for each model and the parameters for the best model using the `get_params()` method. Use an $\\alpha$ value of 0.1.\n\n**Hint:** You can consider using the `sklearn.model_selection.train_test_split` method to create the training and test datasets.",
"_____no_output_____"
]
],
[
[
"from sklearn.datasets import load_boston \nfrom sklearn.model_selection import train_test_split\n\nboston = load_boston()\n#print(boston.target)\n#print(boston.data)\n\nx_train, x_test, y_train,y_test = train_test_split(boston.data,boston.target, test_size=0.3)\n\n\nprint(x_train.shape)\n\"\"\"\nmodelOLS = OLSRegression()\nmodelRidge = RidgeRegression()\nmodelLasso = LassoRegression()\n\nmodels = [modelOLS, modelRidge, modelLasso]\n\nfor model in models:\n model.fit(x_train,y_train)\n model.score\n\"\"\"\n",
"(354, 13)\n"
]
],
[
[
"### Part 5: Visualize Model Performance\n\nWe can evaluate how the models perform for various values of $\\alpha$. Calculate the $R^2$ scores for each model for $\\alpha \\in [0.05, 1]$ and plot the three lines on the same graph. To change the parameters, use the `set_params()` method. Be sure to label each line and add axis labels.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.