hexsha
stringlengths
40
40
size
int64
6
14.9M
ext
stringclasses
1 value
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
6
260
max_stars_repo_name
stringlengths
6
119
max_stars_repo_head_hexsha
stringlengths
40
41
max_stars_repo_licenses
list
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
6
260
max_issues_repo_name
stringlengths
6
119
max_issues_repo_head_hexsha
stringlengths
40
41
max_issues_repo_licenses
list
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
6
260
max_forks_repo_name
stringlengths
6
119
max_forks_repo_head_hexsha
stringlengths
40
41
max_forks_repo_licenses
list
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
avg_line_length
float64
2
1.04M
max_line_length
int64
2
11.2M
alphanum_fraction
float64
0
1
cells
list
cell_types
list
cell_type_groups
list
4a79338f46f7b04145faaaf17829305eb82355b1
6,992
ipynb
Jupyter Notebook
OpenWeatherMap/OpenWeatherMap_Get_City_Weather.ipynb
techthiyanes/awesome-notebooks
10ab4da1b94dfa101e908356a649609b0b17561a
[ "BSD-3-Clause" ]
null
null
null
OpenWeatherMap/OpenWeatherMap_Get_City_Weather.ipynb
techthiyanes/awesome-notebooks
10ab4da1b94dfa101e908356a649609b0b17561a
[ "BSD-3-Clause" ]
null
null
null
OpenWeatherMap/OpenWeatherMap_Get_City_Weather.ipynb
techthiyanes/awesome-notebooks
10ab4da1b94dfa101e908356a649609b0b17561a
[ "BSD-3-Clause" ]
null
null
null
25.060932
300
0.580235
[ [ [ "<img width=\"10%\" alt=\"Naas\" src=\"https://landen.imgix.net/jtci2pxwjczr/assets/5ice39g4.png?w=160\"/>", "_____no_output_____" ], [ "# OpenWeatherMap - Get City Weather\n<a href=\"https://app.naas.ai/user-redirect/naas/downloader?url=https://raw.githubusercontent.com/jupyter-naas/awesome-notebooks/master/OpenWeatherMap/OpenWeatherMap_Get_City_Weather.ipynb\" target=\"_parent\"><img src=\"https://naasai-public.s3.eu-west-3.amazonaws.com/open_in_naas.svg\"/></a>", "_____no_output_____" ], [ "**Tags:** #openweathermap #opendata #snippet #dataframe", "_____no_output_____" ], [ "**Author:** [Christophe Blefari](https://www.linkedin.com/in/christopheblefari/)", "_____no_output_____" ], [ "## Input", "_____no_output_____" ], [ "### Import library", "_____no_output_____" ] ], [ [ "import requests", "_____no_output_____" ] ], [ [ "### Variables", "_____no_output_____" ] ], [ [ "OPENWEATHER_KEY = '**********' # get your key from here https://home.openweathermap.org/api_keys (it takes couples of minutes)\nCITY = \"Paris\"", "_____no_output_____" ] ], [ [ "## Model", "_____no_output_____" ], [ "### Fonctions", "_____no_output_____" ] ], [ [ "def get_weather_info(city):\n url = f\"http://api.openweathermap.org/data/2.5/weather?q={city}&appid={OPENWEATHER_KEY}\"\n response = requests.get(url)\n return response.json()\n\ndef format_weather_data(data):\n return {\n \"temp\": f'{round(int(data[\"main\"][\"temp\"]) - 273.15, 1)}°',\n \"city\": data[\"name\"],\n }\n \ndef run(city):\n data = get_weather_info(city)\n return format_weather_data(data)", "_____no_output_____" ] ], [ [ "## Output", "_____no_output_____" ], [ "### Display result", "_____no_output_____" ] ], [ [ "run(CITY)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
4a793a463d70825836dc80a1cb160d04723be7b3
8,287
ipynb
Jupyter Notebook
.github/controls/check_notebooks.ipynb
srini047/awesome-notebooks
2a5b771b37b62090de5311d61dce8495fae7b59f
[ "BSD-3-Clause" ]
1
2022-03-24T07:46:45.000Z
2022-03-24T07:46:45.000Z
.github/controls/check_notebooks.ipynb
srini047/awesome-notebooks
2a5b771b37b62090de5311d61dce8495fae7b59f
[ "BSD-3-Clause" ]
null
null
null
.github/controls/check_notebooks.ipynb
srini047/awesome-notebooks
2a5b771b37b62090de5311d61dce8495fae7b59f
[ "BSD-3-Clause" ]
null
null
null
29.179577
168
0.509835
[ [ [ "<img width=\"10%\" alt=\"Naas\" src=\"https://landen.imgix.net/jtci2pxwjczr/assets/5ice39g4.png?w=160\"/>", "_____no_output_____" ], [ "# CI/CD - Make sure all notebooks respects our format policy", "_____no_output_____" ], [ "**Tags:** #naas", "_____no_output_____" ], [ "**Author:** [Maxime Jublou](https://www.linkedin.com/in/maximejublou/)", "_____no_output_____" ], [ "# Input", "_____no_output_____" ], [ "### Import libraries", "_____no_output_____" ] ], [ [ "import json\nimport glob\nfrom rich import print\nimport pydash\nimport re", "_____no_output_____" ] ], [ [ "## Model", "_____no_output_____" ], [ "### Utility functions\n\nThese functions are used by other to not repeat ourselves.", "_____no_output_____" ] ], [ [ "def tag_exists(tagname, cells):\n for cell in cells:\n if tagname in pydash.get(cell, 'metadata.tags', []):\n return True\n return False\n\ndef regexp_match(regex, string):\n matches = re.finditer(regex, string, re.MULTILINE)\n return len(list(matches)) >= 1\n\ndef check_regexp(cells, regex, source):\n cell_str = pydash.get(cells, source, '')\n return regexp_match(regex, cell_str)\n\ndef check_title_exists(cells, title):\n for cell in cells:\n if pydash.get(cell, 'cell_type') == 'markdown' and regexp_match(rf\"^## *{title}\", pydash.get(cell, 'source[0]')):\n return True\n return False", "_____no_output_____" ] ], [ [ "### Check functions\n\nThis functions are used to check if a notebook contains the rights cells with proper formatting.", "_____no_output_____" ] ], [ [ "def check_naas_logo(cells):\n logo_content = '<img width=\"10%\" alt=\"Naas\" src=\"https://landen.imgix.net/jtci2pxwjczr/assets/5ice39g4.png?w=160\"/>'\n if pydash.get(cells, '[0].cell_type') == 'markdown' and pydash.get(cells, '[0].source[0]', '').startswith(logo_content):\n return (True, '')\n return (False, f'''\n Requirements:\n - Cell number: 1\n - Cell type: Markdown\n - Shape: {logo_content}\n ''')\n\ndef check_title_match_regexp(cells):\n return (check_regexp(cells, r\"markdown\", '[1].cell_type') and check_regexp(cells, r\"^#.*-.*\", '[1].source[0]'), '''\n Requirements:\n - Cell number: 2\n - Cell type: Markdown\n - Shape: \"# something - some other thing\"\n ''')\n\ndef check_tool_tags(cells):\n return (check_regexp(cells, r\"markdown\", '[2].cell_type') and check_regexp(cells, r\"^\\*\\*Tags:\\*\\* (#[1-9,a-z,A-Z]*( *|$))*\", '[2].source[0]'), '''\n Requirements:\n - Cell number: 3\n - Cell type: Markdown\n - Shape: \"**Tags:** #atLeastOneTool\"\n ''')\n\ndef check_author(cells):\n return (check_regexp(cells, r\"markdown\", '[3].cell_type') and check_regexp(cells, r\"^\\*\\*Author:\\*\\* *.*\", '[3].source[0]'), '''\n Requirements:\n - Cell number: 4\n - Cell type: Markdown\n - Shape: \"**Author:** At least one author name\"\n ''')\n\ndef check_input_title_exists(cells):\n return (check_title_exists(cells, 'Input'), '''\n Requirements:\n - Cell number: Any\n - Cell type: Markdown\n - Shape: \"## Input\"\n ''')\n\ndef check_model_title_exists(cells):\n return (check_title_exists(cells, 'Model'), '''\n Requirements:\n - Cell number: Any\n - Cell type: Markdown\n - Shape: \"## Model\"\n ''')\n\ndef check_output_title_exists(cells):\n return (check_title_exists(cells, 'Output'), '''\n Requirements:\n - Cell number: Any\n - Cell type: Markdown\n - Shape: \"## Output\"\n ''')", "_____no_output_____" ] ], [ [ "## Output", "_____no_output_____" ] ], [ [ "got_errors = False\nerror_counter = 0\nfor file in glob.glob('../../**/*.ipynb', recursive=True):\n # Do not check notebooks in .github or at the root of the project.\n if '.github' in file or len(file.split('/')) == 3:\n continue\n\n notebook = json.load(open(file))\n cells = notebook.get('cells')\n \n filename = \"[dark_orange]\" + file.replace(\"../../\", \"\") + \"[/dark_orange]\"\n outputs = [f'Errors found in: {filename}']\n should_display_debug = False\n \n for checkf in [\n check_naas_logo,\n check_title_match_regexp,\n check_tool_tags,\n check_author,\n check_input_title_exists,\n check_model_title_exists,\n check_output_title_exists]: \n \n result, msg = checkf(cells)\n if result is False:\n should_display_debug = True\n status_msg = \"[bright_green]OK[/bright_green]\" if result is True else f\"[bright_red]KO {msg}[/bright_red]\"\n outputs.append(f'{checkf.__name__} ... {status_msg}')\n \n if should_display_debug:\n got_errors = True\n error_counter += 1\n for msg in outputs:\n print(msg)\n print(\"\\n\")\n\nif got_errors == True:\n print(f'[bright_red]You have {error_counter} notebooks having errors!')\n exit(1)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a79510575f9dd61dc2ca13e112a6703e78234cb
42,625
ipynb
Jupyter Notebook
d2l-en/mxnet/chapter_linear-networks/softmax-regression-concise.ipynb
gr8khan/d2lai
7c10432f38c80e86978cd075d0024902b47842a0
[ "MIT" ]
null
null
null
d2l-en/mxnet/chapter_linear-networks/softmax-regression-concise.ipynb
gr8khan/d2lai
7c10432f38c80e86978cd075d0024902b47842a0
[ "MIT" ]
null
null
null
d2l-en/mxnet/chapter_linear-networks/softmax-regression-concise.ipynb
gr8khan/d2lai
7c10432f38c80e86978cd075d0024902b47842a0
[ "MIT" ]
null
null
null
40.441176
362
0.508504
[ [ [ "# Concise Implementation of Softmax Regression\n:label:`sec_softmax_concise`\n\nJust as high-level APIs of deep learning frameworks\nmade it much easier\nto implement linear regression in :numref:`sec_linear_concise`,\nwe will find it similarly (or possibly more)\nconvenient for implementing classification models. Let us stick with the Fashion-MNIST dataset\nand keep the batch size at 256 as in :numref:`sec_softmax_scratch`.\n", "_____no_output_____" ] ], [ [ "from d2l import mxnet as d2l\nfrom mxnet import gluon, init, npx\nfrom mxnet.gluon import nn\nnpx.set_np()", "_____no_output_____" ], [ "batch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)", "_____no_output_____" ] ], [ [ "## Initializing Model Parameters\n\nAs mentioned in :numref:`sec_softmax`,\nthe output layer of softmax regression\nis a fully-connected layer.\nTherefore, to implement our model,\nwe just need to add one fully-connected layer\nwith 10 outputs to our `Sequential`.\nAgain, here, the `Sequential` is not really necessary,\nbut we might as well form the habit since it will be ubiquitous\nwhen implementing deep models.\nAgain, we initialize the weights at random\nwith zero mean and standard deviation 0.01.\n", "_____no_output_____" ] ], [ [ "net = nn.Sequential()\nnet.add(nn.Dense(10))\nnet.initialize(init.Normal(sigma=0.01))", "_____no_output_____" ] ], [ [ "## Softmax Implementation Revisited\n:label:`subsec_softmax-implementation-revisited`\n\nIn the previous example of :numref:`sec_softmax_scratch`,\nwe calculated our model's output\nand then ran this output through the cross-entropy loss.\nMathematically, that is a perfectly reasonable thing to do.\nHowever, from a computational perspective,\nexponentiation can be a source of numerical stability issues.\n\nRecall that the softmax function calculates\n$\\hat y_j = \\frac{\\exp(o_j)}{\\sum_k \\exp(o_k)}$,\nwhere $\\hat y_j$ is the $j^\\mathrm{th}$ element of\nthe predicted probability distribution $\\hat{\\mathbf{y}}$\nand $o_j$ is the $j^\\mathrm{th}$ element of the logits\n$\\mathbf{o}$.\nIf some of the $o_k$ are very large (i.e., very positive),\nthen $\\exp(o_k)$ might be larger than the largest number\nwe can have for certain data types (i.e., *overflow*).\nThis would make the denominator (and/or numerator) `inf` (infinity)\nand we wind up encountering either 0, `inf`, or `nan` (not a number) for $\\hat y_j$.\nIn these situations we do not get a well-defined\nreturn value for cross-entropy.\n\n\nOne trick to get around this is to first subtract $\\max(o_k)$\nfrom all $o_k$ before proceeding with the softmax calculation.\nYou can verify that this shifting of each $o_k$ by constant factor\ndoes not change the return value of softmax.\nAfter the subtraction and normalization step,\nit might be possible that some $o_j$ have large negative values\nand thus that the corresponding $\\exp(o_j)$ will take values close to zero.\nThese might be rounded to zero due to finite precision (i.e., *underflow*),\nmaking $\\hat y_j$ zero and giving us `-inf` for $\\log(\\hat y_j)$.\nA few steps down the road in backpropagation,\nwe might find ourselves faced with a screenful\nof the dreaded `nan` results.\n\nFortunately, we are saved by the fact that\neven though we are computing exponential functions,\nwe ultimately intend to take their log\n(when calculating the cross-entropy loss).\nBy combining these two operators\nsoftmax and cross-entropy together,\nwe can escape the numerical stability issues\nthat might otherwise plague us during backpropagation.\nAs shown in the equation below, we avoid calculating $\\exp(o_j)$\nand can use instead $o_j$ directly due to the canceling in $\\log(\\exp(\\cdot))$.\n\n$$\n\\begin{aligned}\n\\log{(\\hat y_j)} & = \\log\\left( \\frac{\\exp(o_j)}{\\sum_k \\exp(o_k)}\\right) \\\\\n& = \\log{(\\exp(o_j))}-\\log{\\left( \\sum_k \\exp(o_k) \\right)} \\\\\n& = o_j -\\log{\\left( \\sum_k \\exp(o_k) \\right)}.\n\\end{aligned}\n$$\n\nWe will want to keep the conventional softmax function handy\nin case we ever want to evaluate the output probabilities by our model.\nBut instead of passing softmax probabilities into our new loss function,\nwe will just pass the logits and compute the softmax and its log\nall at once inside the cross-entropy loss function,\nwhich does smart things like the [\"LogSumExp trick\"](https://en.wikipedia.org/wiki/LogSumExp).\n", "_____no_output_____" ] ], [ [ "loss = gluon.loss.SoftmaxCrossEntropyLoss()", "_____no_output_____" ] ], [ [ "## Optimization Algorithm\n\nHere, we use minibatch stochastic gradient descent\nwith a learning rate of 0.1 as the optimization algorithm.\nNote that this is the same as we applied in the linear regression example\nand it illustrates the general applicability of the optimizers.\n", "_____no_output_____" ] ], [ [ "trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.1})", "_____no_output_____" ] ], [ [ "## Training\n\nNext we call the training function defined in :numref:`sec_softmax_scratch` to train the model.\n", "_____no_output_____" ] ], [ [ "num_epochs = 10\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)", "_____no_output_____" ] ], [ [ "As before, this algorithm converges to a solution\nthat achieves a decent accuracy,\nalbeit this time with fewer lines of code than before.\n\n\n## Summary\n\n* Using high-level APIs, we can implement softmax regression much more concisely.\n* From a computational perspective, implementing softmax regression has intricacies. Note that in many cases, a deep learning framework takes additional precautions beyond these most well-known tricks to ensure numerical stability, saving us from even more pitfalls that we would encounter if we tried to code all of our models from scratch in practice.\n\n## Exercises\n\n1. Try adjusting the hyperparameters, such as the batch size, number of epochs, and learning rate, to see what the results are.\n1. Increase the numper of epochs for training. Why might the test accuracy decrease after a while? How could we fix this?\n", "_____no_output_____" ], [ "[Discussions](https://discuss.d2l.ai/t/52)\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
4a795f085c411e25f816006c096f4a26aadd3ed8
130,249
ipynb
Jupyter Notebook
Parse/BeautifulSoup 4/SoupStrainer.ipynb
Novartus/ScrapBook
635e818e226f09883913ecb63283a7262dde5333
[ "MIT" ]
null
null
null
Parse/BeautifulSoup 4/SoupStrainer.ipynb
Novartus/ScrapBook
635e818e226f09883913ecb63283a7262dde5333
[ "MIT" ]
null
null
null
Parse/BeautifulSoup 4/SoupStrainer.ipynb
Novartus/ScrapBook
635e818e226f09883913ecb63283a7262dde5333
[ "MIT" ]
null
null
null
42.537231
259
0.407796
[ [ [ "import re, requests\nfrom bs4 import BeautifulSoup, SoupStrainer", "_____no_output_____" ], [ "url=\"http://books.toscrape.com/index.html\"\nresp = requests.get(url)\nresp", "_____no_output_____" ], [ "soup = BeautifulSoup(resp.text, \"lxml\")\nprint(soup.prettify())", "<!DOCTYPE html>\n<!--[if lt IE 7]> <html lang=\"en-us\" class=\"no-js lt-ie9 lt-ie8 lt-ie7\"> <![endif]-->\n<!--[if IE 7]> <html lang=\"en-us\" class=\"no-js lt-ie9 lt-ie8\"> <![endif]-->\n<!--[if IE 8]> <html lang=\"en-us\" class=\"no-js lt-ie9\"> <![endif]-->\n<!--[if gt IE 8]><!-->\n<html class=\"no-js\" lang=\"en-us\">\n <!--<![endif]-->\n <head>\n <title>\n All products | Books to Scrape - Sandbox\n </title>\n <meta content=\"text/html; charset=utf-8\" http-equiv=\"content-type\"/>\n <meta content=\"24th Jun 2016 09:29\" name=\"created\"/>\n <meta content=\"\" name=\"description\"/>\n <meta content=\"width=device-width\" name=\"viewport\"/>\n <meta content=\"NOARCHIVE,NOCACHE\" name=\"robots\"/>\n <!-- Le HTML5 shim, for IE6-8 support of HTML elements -->\n <!--[if lt IE 9]>\n <script src=\"//html5shim.googlecode.com/svn/trunk/html5.js\"></script>\n <![endif]-->\n <link href=\"static/oscar/favicon.ico\" rel=\"shortcut icon\"/>\n <link href=\"static/oscar/css/styles.css\" rel=\"stylesheet\" type=\"text/css\"/>\n <link href=\"static/oscar/js/bootstrap-datetimepicker/bootstrap-datetimepicker.css\" rel=\"stylesheet\"/>\n <link href=\"static/oscar/css/datetimepicker.css\" rel=\"stylesheet\" type=\"text/css\"/>\n </head>\n <body class=\"default\" id=\"default\">\n <header class=\"header container-fluid\">\n <div class=\"page_inner\">\n <div class=\"row\">\n <div class=\"col-sm-8 h1\">\n <a href=\"index.html\">\n Books to Scrape\n </a>\n <small>\n We love being scraped!\n </small>\n </div>\n </div>\n </div>\n </header>\n <div class=\"container-fluid page\">\n <div class=\"page_inner\">\n <ul class=\"breadcrumb\">\n <li>\n <a href=\"index.html\">\n Home\n </a>\n </li>\n <li class=\"active\">\n All products\n </li>\n </ul>\n <div class=\"row\">\n <aside class=\"sidebar col-sm-4 col-md-3\">\n <div id=\"promotions_left\">\n </div>\n <div class=\"side_categories\">\n <ul class=\"nav nav-list\">\n <li>\n <a href=\"catalogue/category/books_1/index.html\">\n Books\n </a>\n <ul>\n <li>\n <a href=\"catalogue/category/books/travel_2/index.html\">\n Travel\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/mystery_3/index.html\">\n Mystery\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/historical-fiction_4/index.html\">\n Historical Fiction\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/sequential-art_5/index.html\">\n Sequential Art\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/classics_6/index.html\">\n Classics\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/philosophy_7/index.html\">\n Philosophy\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/romance_8/index.html\">\n Romance\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/womens-fiction_9/index.html\">\n Womens Fiction\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/fiction_10/index.html\">\n Fiction\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/childrens_11/index.html\">\n Childrens\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/religion_12/index.html\">\n Religion\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/nonfiction_13/index.html\">\n Nonfiction\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/music_14/index.html\">\n Music\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/default_15/index.html\">\n Default\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/science-fiction_16/index.html\">\n Science Fiction\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/sports-and-games_17/index.html\">\n Sports and Games\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/add-a-comment_18/index.html\">\n Add a comment\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/fantasy_19/index.html\">\n Fantasy\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/new-adult_20/index.html\">\n New Adult\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/young-adult_21/index.html\">\n Young Adult\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/science_22/index.html\">\n Science\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/poetry_23/index.html\">\n Poetry\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/paranormal_24/index.html\">\n Paranormal\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/art_25/index.html\">\n Art\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/psychology_26/index.html\">\n Psychology\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/autobiography_27/index.html\">\n Autobiography\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/parenting_28/index.html\">\n Parenting\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/adult-fiction_29/index.html\">\n Adult Fiction\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/humor_30/index.html\">\n Humor\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/horror_31/index.html\">\n Horror\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/history_32/index.html\">\n History\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/food-and-drink_33/index.html\">\n Food and Drink\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/christian-fiction_34/index.html\">\n Christian Fiction\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/business_35/index.html\">\n Business\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/biography_36/index.html\">\n Biography\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/thriller_37/index.html\">\n Thriller\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/contemporary_38/index.html\">\n Contemporary\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/spirituality_39/index.html\">\n Spirituality\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/academic_40/index.html\">\n Academic\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/self-help_41/index.html\">\n Self Help\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/historical_42/index.html\">\n Historical\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/christian_43/index.html\">\n Christian\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/suspense_44/index.html\">\n Suspense\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/short-stories_45/index.html\">\n Short Stories\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/novels_46/index.html\">\n Novels\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/health_47/index.html\">\n Health\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/politics_48/index.html\">\n Politics\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/cultural_49/index.html\">\n Cultural\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/erotica_50/index.html\">\n Erotica\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/crime_51/index.html\">\n Crime\n </a>\n </li>\n </ul>\n </li>\n </ul>\n </div>\n </aside>\n <div class=\"col-sm-8 col-md-9\">\n <div class=\"page-header action\">\n <h1>\n All products\n </h1>\n </div>\n <div id=\"messages\">\n </div>\n <div id=\"promotions\">\n </div>\n <form class=\"form-horizontal\" method=\"get\">\n <div style=\"display:none\">\n </div>\n <strong>\n 1000\n </strong>\n results - showing\n <strong>\n 1\n </strong>\n to\n <strong>\n 20\n </strong>\n .\n </form>\n <section>\n <div class=\"alert alert-warning\" role=\"alert\">\n <strong>\n Warning!\n </strong>\n This is a demo website for web scraping purposes. Prices and ratings here were randomly assigned and have no real meaning.\n </div>\n <div>\n <ol class=\"row\">\n <li class=\"col-xs-6 col-sm-4 col-md-3 col-lg-3\">\n <article class=\"product_pod\">\n <div class=\"image_container\">\n <a href=\"catalogue/a-light-in-the-attic_1000/index.html\">\n <img alt=\"A Light in the Attic\" class=\"thumbnail\" src=\"media/cache/2c/da/2cdad67c44b002e7ead0cc35693c0e8b.jpg\"/>\n </a>\n </div>\n <p class=\"star-rating Three\">\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n </p>\n <h3>\n <a href=\"catalogue/a-light-in-the-attic_1000/index.html\" title=\"A Light in the Attic\">\n A Light in the ...\n </a>\n </h3>\n <div class=\"product_price\">\n <p class=\"price_color\">\n £51.77\n </p>\n <p class=\"instock availability\">\n <i class=\"icon-ok\">\n </i>\n In stock\n </p>\n <form>\n <button class=\"btn btn-primary btn-block\" data-loading-text=\"Adding...\" type=\"submit\">\n Add to basket\n </button>\n </form>\n </div>\n </article>\n </li>\n <li class=\"col-xs-6 col-sm-4 col-md-3 col-lg-3\">\n <article class=\"product_pod\">\n <div class=\"image_container\">\n <a href=\"catalogue/tipping-the-velvet_999/index.html\">\n <img alt=\"Tipping the Velvet\" class=\"thumbnail\" src=\"media/cache/26/0c/260c6ae16bce31c8f8c95daddd9f4a1c.jpg\"/>\n </a>\n </div>\n <p class=\"star-rating One\">\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n </p>\n <h3>\n <a href=\"catalogue/tipping-the-velvet_999/index.html\" title=\"Tipping the Velvet\">\n Tipping the Velvet\n </a>\n </h3>\n <div class=\"product_price\">\n <p class=\"price_color\">\n £53.74\n </p>\n <p class=\"instock availability\">\n <i class=\"icon-ok\">\n </i>\n In stock\n </p>\n <form>\n <button class=\"btn btn-primary btn-block\" data-loading-text=\"Adding...\" type=\"submit\">\n Add to basket\n </button>\n </form>\n </div>\n </article>\n </li>\n <li class=\"col-xs-6 col-sm-4 col-md-3 col-lg-3\">\n <article class=\"product_pod\">\n <div class=\"image_container\">\n <a href=\"catalogue/soumission_998/index.html\">\n <img alt=\"Soumission\" class=\"thumbnail\" src=\"media/cache/3e/ef/3eef99c9d9adef34639f510662022830.jpg\"/>\n </a>\n </div>\n <p class=\"star-rating One\">\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n </p>\n <h3>\n <a href=\"catalogue/soumission_998/index.html\" title=\"Soumission\">\n Soumission\n </a>\n </h3>\n <div class=\"product_price\">\n <p class=\"price_color\">\n £50.10\n </p>\n <p class=\"instock availability\">\n <i class=\"icon-ok\">\n </i>\n In stock\n </p>\n <form>\n <button class=\"btn btn-primary btn-block\" data-loading-text=\"Adding...\" type=\"submit\">\n Add to basket\n </button>\n </form>\n </div>\n </article>\n </li>\n <li class=\"col-xs-6 col-sm-4 col-md-3 col-lg-3\">\n <article class=\"product_pod\">\n <div class=\"image_container\">\n <a href=\"catalogue/sharp-objects_997/index.html\">\n <img alt=\"Sharp Objects\" class=\"thumbnail\" src=\"media/cache/32/51/3251cf3a3412f53f339e42cac2134093.jpg\"/>\n </a>\n </div>\n <p class=\"star-rating Four\">\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n </p>\n <h3>\n <a href=\"catalogue/sharp-objects_997/index.html\" title=\"Sharp Objects\">\n Sharp Objects\n </a>\n </h3>\n <div class=\"product_price\">\n <p class=\"price_color\">\n £47.82\n </p>\n <p class=\"instock availability\">\n <i class=\"icon-ok\">\n </i>\n In stock\n </p>\n <form>\n <button class=\"btn btn-primary btn-block\" data-loading-text=\"Adding...\" type=\"submit\">\n Add to basket\n </button>\n </form>\n </div>\n </article>\n </li>\n <li class=\"col-xs-6 col-sm-4 col-md-3 col-lg-3\">\n <article class=\"product_pod\">\n <div class=\"image_container\">\n <a href=\"catalogue/sapiens-a-brief-history-of-humankind_996/index.html\">\n <img alt=\"Sapiens: A Brief History of Humankind\" class=\"thumbnail\" src=\"media/cache/be/a5/bea5697f2534a2f86a3ef27b5a8c12a6.jpg\"/>\n </a>\n </div>\n <p class=\"star-rating Five\">\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n </p>\n <h3>\n <a href=\"catalogue/sapiens-a-brief-history-of-humankind_996/index.html\" title=\"Sapiens: A Brief History of Humankind\">\n Sapiens: A Brief History ...\n </a>\n </h3>\n <div class=\"product_price\">\n <p class=\"price_color\">\n £54.23\n </p>\n <p class=\"instock availability\">\n <i class=\"icon-ok\">\n </i>\n In stock\n </p>\n <form>\n <button class=\"btn btn-primary btn-block\" data-loading-text=\"Adding...\" type=\"submit\">\n Add to basket\n </button>\n </form>\n </div>\n </article>\n </li>\n <li class=\"col-xs-6 col-sm-4 col-md-3 col-lg-3\">\n <article class=\"product_pod\">\n <div class=\"image_container\">\n <a href=\"catalogue/the-requiem-red_995/index.html\">\n <img alt=\"The Requiem Red\" class=\"thumbnail\" src=\"media/cache/68/33/68339b4c9bc034267e1da611ab3b34f8.jpg\"/>\n </a>\n </div>\n <p class=\"star-rating One\">\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n </p>\n <h3>\n <a href=\"catalogue/the-requiem-red_995/index.html\" title=\"The Requiem Red\">\n The Requiem Red\n </a>\n </h3>\n <div class=\"product_price\">\n <p class=\"price_color\">\n £22.65\n </p>\n <p class=\"instock availability\">\n <i class=\"icon-ok\">\n </i>\n In stock\n </p>\n <form>\n <button class=\"btn btn-primary btn-block\" data-loading-text=\"Adding...\" type=\"submit\">\n Add to basket\n </button>\n </form>\n </div>\n </article>\n </li>\n <li class=\"col-xs-6 col-sm-4 col-md-3 col-lg-3\">\n <article class=\"product_pod\">\n <div class=\"image_container\">\n <a href=\"catalogue/the-dirty-little-secrets-of-getting-your-dream-job_994/index.html\">\n <img alt=\"The Dirty Little Secrets of Getting Your Dream Job\" class=\"thumbnail\" src=\"media/cache/92/27/92274a95b7c251fea59a2b8a78275ab4.jpg\"/>\n </a>\n </div>\n <p class=\"star-rating Four\">\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n </p>\n <h3>\n <a href=\"catalogue/the-dirty-little-secrets-of-getting-your-dream-job_994/index.html\" title=\"The Dirty Little Secrets of Getting Your Dream Job\">\n The Dirty Little Secrets ...\n </a>\n </h3>\n <div class=\"product_price\">\n <p class=\"price_color\">\n £33.34\n </p>\n <p class=\"instock availability\">\n <i class=\"icon-ok\">\n </i>\n In stock\n </p>\n <form>\n <button class=\"btn btn-primary btn-block\" data-loading-text=\"Adding...\" type=\"submit\">\n Add to basket\n </button>\n </form>\n </div>\n </article>\n </li>\n <li class=\"col-xs-6 col-sm-4 col-md-3 col-lg-3\">\n <article class=\"product_pod\">\n <div class=\"image_container\">\n <a href=\"catalogue/the-coming-woman-a-novel-based-on-the-life-of-the-infamous-feminist-victoria-woodhull_993/index.html\">\n <img alt=\"The Coming Woman: A Novel Based on the Life of the Infamous Feminist, Victoria Woodhull\" class=\"thumbnail\" src=\"media/cache/3d/54/3d54940e57e662c4dd1f3ff00c78cc64.jpg\"/>\n </a>\n </div>\n <p class=\"star-rating Three\">\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n </p>\n <h3>\n <a href=\"catalogue/the-coming-woman-a-novel-based-on-the-life-of-the-infamous-feminist-victoria-woodhull_993/index.html\" title=\"The Coming Woman: A Novel Based on the Life of the Infamous Feminist, Victoria Woodhull\">\n The Coming Woman: A ...\n </a>\n </h3>\n <div class=\"product_price\">\n <p class=\"price_color\">\n £17.93\n </p>\n <p class=\"instock availability\">\n <i class=\"icon-ok\">\n </i>\n In stock\n </p>\n <form>\n <button class=\"btn btn-primary btn-block\" data-loading-text=\"Adding...\" type=\"submit\">\n Add to basket\n </button>\n </form>\n </div>\n </article>\n </li>\n <li class=\"col-xs-6 col-sm-4 col-md-3 col-lg-3\">\n <article class=\"product_pod\">\n <div class=\"image_container\">\n <a href=\"catalogue/the-boys-in-the-boat-nine-americans-and-their-epic-quest-for-gold-at-the-1936-berlin-olympics_992/index.html\">\n <img alt=\"The Boys in the Boat: Nine Americans and Their Epic Quest for Gold at the 1936 Berlin Olympics\" class=\"thumbnail\" src=\"media/cache/66/88/66883b91f6804b2323c8369331cb7dd1.jpg\"/>\n </a>\n </div>\n <p class=\"star-rating Four\">\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n </p>\n <h3>\n <a href=\"catalogue/the-boys-in-the-boat-nine-americans-and-their-epic-quest-for-gold-at-the-1936-berlin-olympics_992/index.html\" title=\"The Boys in the Boat: Nine Americans and Their Epic Quest for Gold at the 1936 Berlin Olympics\">\n The Boys in the ...\n </a>\n </h3>\n <div class=\"product_price\">\n <p class=\"price_color\">\n £22.60\n </p>\n <p class=\"instock availability\">\n <i class=\"icon-ok\">\n </i>\n In stock\n </p>\n <form>\n <button class=\"btn btn-primary btn-block\" data-loading-text=\"Adding...\" type=\"submit\">\n Add to basket\n </button>\n </form>\n </div>\n </article>\n </li>\n <li class=\"col-xs-6 col-sm-4 col-md-3 col-lg-3\">\n <article class=\"product_pod\">\n <div class=\"image_container\">\n <a href=\"catalogue/the-black-maria_991/index.html\">\n <img alt=\"The Black Maria\" class=\"thumbnail\" src=\"media/cache/58/46/5846057e28022268153beff6d352b06c.jpg\"/>\n </a>\n </div>\n <p class=\"star-rating One\">\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n </p>\n <h3>\n <a href=\"catalogue/the-black-maria_991/index.html\" title=\"The Black Maria\">\n The Black Maria\n </a>\n </h3>\n <div class=\"product_price\">\n <p class=\"price_color\">\n £52.15\n </p>\n <p class=\"instock availability\">\n <i class=\"icon-ok\">\n </i>\n In stock\n </p>\n <form>\n <button class=\"btn btn-primary btn-block\" data-loading-text=\"Adding...\" type=\"submit\">\n Add to basket\n </button>\n </form>\n </div>\n </article>\n </li>\n <li class=\"col-xs-6 col-sm-4 col-md-3 col-lg-3\">\n <article class=\"product_pod\">\n <div class=\"image_container\">\n <a href=\"catalogue/starving-hearts-triangular-trade-trilogy-1_990/index.html\">\n <img alt=\"Starving Hearts (Triangular Trade Trilogy, #1)\" class=\"thumbnail\" src=\"media/cache/be/f4/bef44da28c98f905a3ebec0b87be8530.jpg\"/>\n </a>\n </div>\n <p class=\"star-rating Two\">\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n </p>\n <h3>\n <a href=\"catalogue/starving-hearts-triangular-trade-trilogy-1_990/index.html\" title=\"Starving Hearts (Triangular Trade Trilogy, #1)\">\n Starving Hearts (Triangular Trade ...\n </a>\n </h3>\n <div class=\"product_price\">\n <p class=\"price_color\">\n £13.99\n </p>\n <p class=\"instock availability\">\n <i class=\"icon-ok\">\n </i>\n In stock\n </p>\n <form>\n <button class=\"btn btn-primary btn-block\" data-loading-text=\"Adding...\" type=\"submit\">\n Add to basket\n </button>\n </form>\n </div>\n </article>\n </li>\n <li class=\"col-xs-6 col-sm-4 col-md-3 col-lg-3\">\n <article class=\"product_pod\">\n <div class=\"image_container\">\n <a href=\"catalogue/shakespeares-sonnets_989/index.html\">\n <img alt=\"Shakespeare's Sonnets\" class=\"thumbnail\" src=\"media/cache/10/48/1048f63d3b5061cd2f424d20b3f9b666.jpg\"/>\n </a>\n </div>\n <p class=\"star-rating Four\">\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n </p>\n <h3>\n <a href=\"catalogue/shakespeares-sonnets_989/index.html\" title=\"Shakespeare's Sonnets\">\n Shakespeare's Sonnets\n </a>\n </h3>\n <div class=\"product_price\">\n <p class=\"price_color\">\n £20.66\n </p>\n <p class=\"instock availability\">\n <i class=\"icon-ok\">\n </i>\n In stock\n </p>\n <form>\n <button class=\"btn btn-primary btn-block\" data-loading-text=\"Adding...\" type=\"submit\">\n Add to basket\n </button>\n </form>\n </div>\n </article>\n </li>\n <li class=\"col-xs-6 col-sm-4 col-md-3 col-lg-3\">\n <article class=\"product_pod\">\n <div class=\"image_container\">\n <a href=\"catalogue/set-me-free_988/index.html\">\n <img alt=\"Set Me Free\" class=\"thumbnail\" src=\"media/cache/5b/88/5b88c52633f53cacf162c15f4f823153.jpg\"/>\n </a>\n </div>\n <p class=\"star-rating Five\">\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n </p>\n <h3>\n <a href=\"catalogue/set-me-free_988/index.html\" title=\"Set Me Free\">\n Set Me Free\n </a>\n </h3>\n <div class=\"product_price\">\n <p class=\"price_color\">\n £17.46\n </p>\n <p class=\"instock availability\">\n <i class=\"icon-ok\">\n </i>\n In stock\n </p>\n <form>\n <button class=\"btn btn-primary btn-block\" data-loading-text=\"Adding...\" type=\"submit\">\n Add to basket\n </button>\n </form>\n </div>\n </article>\n </li>\n <li class=\"col-xs-6 col-sm-4 col-md-3 col-lg-3\">\n <article class=\"product_pod\">\n <div class=\"image_container\">\n <a href=\"catalogue/scott-pilgrims-precious-little-life-scott-pilgrim-1_987/index.html\">\n <img alt=\"Scott Pilgrim's Precious Little Life (Scott Pilgrim #1)\" class=\"thumbnail\" src=\"media/cache/94/b1/94b1b8b244bce9677c2f29ccc890d4d2.jpg\"/>\n </a>\n </div>\n <p class=\"star-rating Five\">\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n </p>\n <h3>\n <a href=\"catalogue/scott-pilgrims-precious-little-life-scott-pilgrim-1_987/index.html\" title=\"Scott Pilgrim's Precious Little Life (Scott Pilgrim #1)\">\n Scott Pilgrim's Precious Little ...\n </a>\n </h3>\n <div class=\"product_price\">\n <p class=\"price_color\">\n £52.29\n </p>\n <p class=\"instock availability\">\n <i class=\"icon-ok\">\n </i>\n In stock\n </p>\n <form>\n <button class=\"btn btn-primary btn-block\" data-loading-text=\"Adding...\" type=\"submit\">\n Add to basket\n </button>\n </form>\n </div>\n </article>\n </li>\n <li class=\"col-xs-6 col-sm-4 col-md-3 col-lg-3\">\n <article class=\"product_pod\">\n <div class=\"image_container\">\n <a href=\"catalogue/rip-it-up-and-start-again_986/index.html\">\n <img alt=\"Rip it Up and Start Again\" class=\"thumbnail\" src=\"media/cache/81/c4/81c4a973364e17d01f217e1188253d5e.jpg\"/>\n </a>\n </div>\n <p class=\"star-rating Five\">\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n </p>\n <h3>\n <a href=\"catalogue/rip-it-up-and-start-again_986/index.html\" title=\"Rip it Up and Start Again\">\n Rip it Up and ...\n </a>\n </h3>\n <div class=\"product_price\">\n <p class=\"price_color\">\n £35.02\n </p>\n <p class=\"instock availability\">\n <i class=\"icon-ok\">\n </i>\n In stock\n </p>\n <form>\n <button class=\"btn btn-primary btn-block\" data-loading-text=\"Adding...\" type=\"submit\">\n Add to basket\n </button>\n </form>\n </div>\n </article>\n </li>\n <li class=\"col-xs-6 col-sm-4 col-md-3 col-lg-3\">\n <article class=\"product_pod\">\n <div class=\"image_container\">\n <a href=\"catalogue/our-band-could-be-your-life-scenes-from-the-american-indie-underground-1981-1991_985/index.html\">\n <img alt=\"Our Band Could Be Your Life: Scenes from the American Indie Underground, 1981-1991\" class=\"thumbnail\" src=\"media/cache/54/60/54607fe8945897cdcced0044103b10b6.jpg\"/>\n </a>\n </div>\n <p class=\"star-rating Three\">\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n </p>\n <h3>\n <a href=\"catalogue/our-band-could-be-your-life-scenes-from-the-american-indie-underground-1981-1991_985/index.html\" title=\"Our Band Could Be Your Life: Scenes from the American Indie Underground, 1981-1991\">\n Our Band Could Be ...\n </a>\n </h3>\n <div class=\"product_price\">\n <p class=\"price_color\">\n £57.25\n </p>\n <p class=\"instock availability\">\n <i class=\"icon-ok\">\n </i>\n In stock\n </p>\n <form>\n <button class=\"btn btn-primary btn-block\" data-loading-text=\"Adding...\" type=\"submit\">\n Add to basket\n </button>\n </form>\n </div>\n </article>\n </li>\n <li class=\"col-xs-6 col-sm-4 col-md-3 col-lg-3\">\n <article class=\"product_pod\">\n <div class=\"image_container\">\n <a href=\"catalogue/olio_984/index.html\">\n <img alt=\"Olio\" class=\"thumbnail\" src=\"media/cache/55/33/553310a7162dfbc2c6d19a84da0df9e1.jpg\"/>\n </a>\n </div>\n <p class=\"star-rating One\">\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n </p>\n <h3>\n <a href=\"catalogue/olio_984/index.html\" title=\"Olio\">\n Olio\n </a>\n </h3>\n <div class=\"product_price\">\n <p class=\"price_color\">\n £23.88\n </p>\n <p class=\"instock availability\">\n <i class=\"icon-ok\">\n </i>\n In stock\n </p>\n <form>\n <button class=\"btn btn-primary btn-block\" data-loading-text=\"Adding...\" type=\"submit\">\n Add to basket\n </button>\n </form>\n </div>\n </article>\n </li>\n <li class=\"col-xs-6 col-sm-4 col-md-3 col-lg-3\">\n <article class=\"product_pod\">\n <div class=\"image_container\">\n <a href=\"catalogue/mesaerion-the-best-science-fiction-stories-1800-1849_983/index.html\">\n <img alt=\"Mesaerion: The Best Science Fiction Stories 1800-1849\" class=\"thumbnail\" src=\"media/cache/09/a3/09a3aef48557576e1a85ba7efea8ecb7.jpg\"/>\n </a>\n </div>\n <p class=\"star-rating One\">\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n </p>\n <h3>\n <a href=\"catalogue/mesaerion-the-best-science-fiction-stories-1800-1849_983/index.html\" title=\"Mesaerion: The Best Science Fiction Stories 1800-1849\">\n Mesaerion: The Best Science ...\n </a>\n </h3>\n <div class=\"product_price\">\n <p class=\"price_color\">\n £37.59\n </p>\n <p class=\"instock availability\">\n <i class=\"icon-ok\">\n </i>\n In stock\n </p>\n <form>\n <button class=\"btn btn-primary btn-block\" data-loading-text=\"Adding...\" type=\"submit\">\n Add to basket\n </button>\n </form>\n </div>\n </article>\n </li>\n <li class=\"col-xs-6 col-sm-4 col-md-3 col-lg-3\">\n <article class=\"product_pod\">\n <div class=\"image_container\">\n <a href=\"catalogue/libertarianism-for-beginners_982/index.html\">\n <img alt=\"Libertarianism for Beginners\" class=\"thumbnail\" src=\"media/cache/0b/bc/0bbcd0a6f4bcd81ccb1049a52736406e.jpg\"/>\n </a>\n </div>\n <p class=\"star-rating Two\">\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n </p>\n <h3>\n <a href=\"catalogue/libertarianism-for-beginners_982/index.html\" title=\"Libertarianism for Beginners\">\n Libertarianism for Beginners\n </a>\n </h3>\n <div class=\"product_price\">\n <p class=\"price_color\">\n £51.33\n </p>\n <p class=\"instock availability\">\n <i class=\"icon-ok\">\n </i>\n In stock\n </p>\n <form>\n <button class=\"btn btn-primary btn-block\" data-loading-text=\"Adding...\" type=\"submit\">\n Add to basket\n </button>\n </form>\n </div>\n </article>\n </li>\n <li class=\"col-xs-6 col-sm-4 col-md-3 col-lg-3\">\n <article class=\"product_pod\">\n <div class=\"image_container\">\n <a href=\"catalogue/its-only-the-himalayas_981/index.html\">\n <img alt=\"It's Only the Himalayas\" class=\"thumbnail\" src=\"media/cache/27/a5/27a53d0bb95bdd88288eaf66c9230d7e.jpg\"/>\n </a>\n </div>\n <p class=\"star-rating Two\">\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n </p>\n <h3>\n <a href=\"catalogue/its-only-the-himalayas_981/index.html\" title=\"It's Only the Himalayas\">\n It's Only the Himalayas\n </a>\n </h3>\n <div class=\"product_price\">\n <p class=\"price_color\">\n £45.17\n </p>\n <p class=\"instock availability\">\n <i class=\"icon-ok\">\n </i>\n In stock\n </p>\n <form>\n <button class=\"btn btn-primary btn-block\" data-loading-text=\"Adding...\" type=\"submit\">\n Add to basket\n </button>\n </form>\n </div>\n </article>\n </li>\n </ol>\n <div>\n <ul class=\"pager\">\n <li class=\"current\">\n Page 1 of 50\n </li>\n <li class=\"next\">\n <a href=\"catalogue/page-2.html\">\n next\n </a>\n </li>\n </ul>\n </div>\n </div>\n </section>\n </div>\n </div>\n <!-- /row -->\n </div>\n <!-- /page_inner -->\n </div>\n <!-- /container-fluid -->\n <footer class=\"footer container-fluid\">\n </footer>\n <!-- jQuery -->\n <script src=\"http://ajax.googleapis.com/ajax/libs/jquery/1.9.1/jquery.min.js\">\n </script>\n <script>\n window.jQuery || document.write('<script src=\"static/oscar/js/jquery/jquery-1.9.1.min.js\"><\\/script>')\n </script>\n <!-- Twitter Bootstrap -->\n <script src=\"static/oscar/js/bootstrap3/bootstrap.min.js\" type=\"text/javascript\">\n </script>\n <!-- Oscar -->\n <script charset=\"utf-8\" src=\"static/oscar/js/oscar/ui.js\" type=\"text/javascript\">\n </script>\n <script charset=\"utf-8\" src=\"static/oscar/js/bootstrap-datetimepicker/bootstrap-datetimepicker.js\" type=\"text/javascript\">\n </script>\n <script charset=\"utf-8\" src=\"static/oscar/js/bootstrap-datetimepicker/locales/bootstrap-datetimepicker.all.js\" type=\"text/javascript\">\n </script>\n <script type=\"text/javascript\">\n $(function() {\n \n \n \n oscar.init();\n\n oscar.search.init();\n\n });\n </script>\n <!-- Version: N/A -->\n </body>\n</html>\n\n" ], [ "div_tags = SoupStrainer(\"div\") #Getting Only Div Tags", "_____no_output_____" ], [ "soup = BeautifulSoup(resp.text,\"lxml\",parse_only = div_tags)\nprint(soup.prettify())", "<!DOCTYPE html>\n<div class=\"page_inner\">\n <div class=\"row\">\n <div class=\"col-sm-8 h1\">\n <a href=\"index.html\">\n Books to Scrape\n </a>\n <small>\n We love being scraped!\n </small>\n </div>\n </div>\n</div>\n<div class=\"container-fluid page\">\n <div class=\"page_inner\">\n <ul class=\"breadcrumb\">\n <li>\n <a href=\"index.html\">\n Home\n </a>\n </li>\n <li class=\"active\">\n All products\n </li>\n </ul>\n <div class=\"row\">\n <aside class=\"sidebar col-sm-4 col-md-3\">\n <div id=\"promotions_left\">\n </div>\n <div class=\"side_categories\">\n <ul class=\"nav nav-list\">\n <li>\n <a href=\"catalogue/category/books_1/index.html\">\n Books\n </a>\n <ul>\n <li>\n <a href=\"catalogue/category/books/travel_2/index.html\">\n Travel\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/mystery_3/index.html\">\n Mystery\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/historical-fiction_4/index.html\">\n Historical Fiction\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/sequential-art_5/index.html\">\n Sequential Art\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/classics_6/index.html\">\n Classics\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/philosophy_7/index.html\">\n Philosophy\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/romance_8/index.html\">\n Romance\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/womens-fiction_9/index.html\">\n Womens Fiction\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/fiction_10/index.html\">\n Fiction\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/childrens_11/index.html\">\n Childrens\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/religion_12/index.html\">\n Religion\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/nonfiction_13/index.html\">\n Nonfiction\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/music_14/index.html\">\n Music\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/default_15/index.html\">\n Default\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/science-fiction_16/index.html\">\n Science Fiction\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/sports-and-games_17/index.html\">\n Sports and Games\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/add-a-comment_18/index.html\">\n Add a comment\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/fantasy_19/index.html\">\n Fantasy\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/new-adult_20/index.html\">\n New Adult\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/young-adult_21/index.html\">\n Young Adult\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/science_22/index.html\">\n Science\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/poetry_23/index.html\">\n Poetry\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/paranormal_24/index.html\">\n Paranormal\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/art_25/index.html\">\n Art\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/psychology_26/index.html\">\n Psychology\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/autobiography_27/index.html\">\n Autobiography\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/parenting_28/index.html\">\n Parenting\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/adult-fiction_29/index.html\">\n Adult Fiction\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/humor_30/index.html\">\n Humor\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/horror_31/index.html\">\n Horror\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/history_32/index.html\">\n History\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/food-and-drink_33/index.html\">\n Food and Drink\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/christian-fiction_34/index.html\">\n Christian Fiction\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/business_35/index.html\">\n Business\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/biography_36/index.html\">\n Biography\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/thriller_37/index.html\">\n Thriller\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/contemporary_38/index.html\">\n Contemporary\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/spirituality_39/index.html\">\n Spirituality\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/academic_40/index.html\">\n Academic\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/self-help_41/index.html\">\n Self Help\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/historical_42/index.html\">\n Historical\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/christian_43/index.html\">\n Christian\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/suspense_44/index.html\">\n Suspense\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/short-stories_45/index.html\">\n Short Stories\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/novels_46/index.html\">\n Novels\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/health_47/index.html\">\n Health\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/politics_48/index.html\">\n Politics\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/cultural_49/index.html\">\n Cultural\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/erotica_50/index.html\">\n Erotica\n </a>\n </li>\n <li>\n <a href=\"catalogue/category/books/crime_51/index.html\">\n Crime\n </a>\n </li>\n </ul>\n </li>\n </ul>\n </div>\n </aside>\n <div class=\"col-sm-8 col-md-9\">\n <div class=\"page-header action\">\n <h1>\n All products\n </h1>\n </div>\n <div id=\"messages\">\n </div>\n <div id=\"promotions\">\n </div>\n <form class=\"form-horizontal\" method=\"get\">\n <div style=\"display:none\">\n </div>\n <strong>\n 1000\n </strong>\n results - showing\n <strong>\n 1\n </strong>\n to\n <strong>\n 20\n </strong>\n .\n </form>\n <section>\n <div class=\"alert alert-warning\" role=\"alert\">\n <strong>\n Warning!\n </strong>\n This is a demo website for web scraping purposes. Prices and ratings here were randomly assigned and have no real meaning.\n </div>\n <div>\n <ol class=\"row\">\n <li class=\"col-xs-6 col-sm-4 col-md-3 col-lg-3\">\n <article class=\"product_pod\">\n <div class=\"image_container\">\n <a href=\"catalogue/a-light-in-the-attic_1000/index.html\">\n <img alt=\"A Light in the Attic\" class=\"thumbnail\" src=\"media/cache/2c/da/2cdad67c44b002e7ead0cc35693c0e8b.jpg\"/>\n </a>\n </div>\n <p class=\"star-rating Three\">\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n </p>\n <h3>\n <a href=\"catalogue/a-light-in-the-attic_1000/index.html\" title=\"A Light in the Attic\">\n A Light in the ...\n </a>\n </h3>\n <div class=\"product_price\">\n <p class=\"price_color\">\n £51.77\n </p>\n <p class=\"instock availability\">\n <i class=\"icon-ok\">\n </i>\n In stock\n </p>\n <form>\n <button class=\"btn btn-primary btn-block\" data-loading-text=\"Adding...\" type=\"submit\">\n Add to basket\n </button>\n </form>\n </div>\n </article>\n </li>\n <li class=\"col-xs-6 col-sm-4 col-md-3 col-lg-3\">\n <article class=\"product_pod\">\n <div class=\"image_container\">\n <a href=\"catalogue/tipping-the-velvet_999/index.html\">\n <img alt=\"Tipping the Velvet\" class=\"thumbnail\" src=\"media/cache/26/0c/260c6ae16bce31c8f8c95daddd9f4a1c.jpg\"/>\n </a>\n </div>\n <p class=\"star-rating One\">\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n </p>\n <h3>\n <a href=\"catalogue/tipping-the-velvet_999/index.html\" title=\"Tipping the Velvet\">\n Tipping the Velvet\n </a>\n </h3>\n <div class=\"product_price\">\n <p class=\"price_color\">\n £53.74\n </p>\n <p class=\"instock availability\">\n <i class=\"icon-ok\">\n </i>\n In stock\n </p>\n <form>\n <button class=\"btn btn-primary btn-block\" data-loading-text=\"Adding...\" type=\"submit\">\n Add to basket\n </button>\n </form>\n </div>\n </article>\n </li>\n <li class=\"col-xs-6 col-sm-4 col-md-3 col-lg-3\">\n <article class=\"product_pod\">\n <div class=\"image_container\">\n <a href=\"catalogue/soumission_998/index.html\">\n <img alt=\"Soumission\" class=\"thumbnail\" src=\"media/cache/3e/ef/3eef99c9d9adef34639f510662022830.jpg\"/>\n </a>\n </div>\n <p class=\"star-rating One\">\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n </p>\n <h3>\n <a href=\"catalogue/soumission_998/index.html\" title=\"Soumission\">\n Soumission\n </a>\n </h3>\n <div class=\"product_price\">\n <p class=\"price_color\">\n £50.10\n </p>\n <p class=\"instock availability\">\n <i class=\"icon-ok\">\n </i>\n In stock\n </p>\n <form>\n <button class=\"btn btn-primary btn-block\" data-loading-text=\"Adding...\" type=\"submit\">\n Add to basket\n </button>\n </form>\n </div>\n </article>\n </li>\n <li class=\"col-xs-6 col-sm-4 col-md-3 col-lg-3\">\n <article class=\"product_pod\">\n <div class=\"image_container\">\n <a href=\"catalogue/sharp-objects_997/index.html\">\n <img alt=\"Sharp Objects\" class=\"thumbnail\" src=\"media/cache/32/51/3251cf3a3412f53f339e42cac2134093.jpg\"/>\n </a>\n </div>\n <p class=\"star-rating Four\">\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n </p>\n <h3>\n <a href=\"catalogue/sharp-objects_997/index.html\" title=\"Sharp Objects\">\n Sharp Objects\n </a>\n </h3>\n <div class=\"product_price\">\n <p class=\"price_color\">\n £47.82\n </p>\n <p class=\"instock availability\">\n <i class=\"icon-ok\">\n </i>\n In stock\n </p>\n <form>\n <button class=\"btn btn-primary btn-block\" data-loading-text=\"Adding...\" type=\"submit\">\n Add to basket\n </button>\n </form>\n </div>\n </article>\n </li>\n <li class=\"col-xs-6 col-sm-4 col-md-3 col-lg-3\">\n <article class=\"product_pod\">\n <div class=\"image_container\">\n <a href=\"catalogue/sapiens-a-brief-history-of-humankind_996/index.html\">\n <img alt=\"Sapiens: A Brief History of Humankind\" class=\"thumbnail\" src=\"media/cache/be/a5/bea5697f2534a2f86a3ef27b5a8c12a6.jpg\"/>\n </a>\n </div>\n <p class=\"star-rating Five\">\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n </p>\n <h3>\n <a href=\"catalogue/sapiens-a-brief-history-of-humankind_996/index.html\" title=\"Sapiens: A Brief History of Humankind\">\n Sapiens: A Brief History ...\n </a>\n </h3>\n <div class=\"product_price\">\n <p class=\"price_color\">\n £54.23\n </p>\n <p class=\"instock availability\">\n <i class=\"icon-ok\">\n </i>\n In stock\n </p>\n <form>\n <button class=\"btn btn-primary btn-block\" data-loading-text=\"Adding...\" type=\"submit\">\n Add to basket\n </button>\n </form>\n </div>\n </article>\n </li>\n <li class=\"col-xs-6 col-sm-4 col-md-3 col-lg-3\">\n <article class=\"product_pod\">\n <div class=\"image_container\">\n <a href=\"catalogue/the-requiem-red_995/index.html\">\n <img alt=\"The Requiem Red\" class=\"thumbnail\" src=\"media/cache/68/33/68339b4c9bc034267e1da611ab3b34f8.jpg\"/>\n </a>\n </div>\n <p class=\"star-rating One\">\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n </p>\n <h3>\n <a href=\"catalogue/the-requiem-red_995/index.html\" title=\"The Requiem Red\">\n The Requiem Red\n </a>\n </h3>\n <div class=\"product_price\">\n <p class=\"price_color\">\n £22.65\n </p>\n <p class=\"instock availability\">\n <i class=\"icon-ok\">\n </i>\n In stock\n </p>\n <form>\n <button class=\"btn btn-primary btn-block\" data-loading-text=\"Adding...\" type=\"submit\">\n Add to basket\n </button>\n </form>\n </div>\n </article>\n </li>\n <li class=\"col-xs-6 col-sm-4 col-md-3 col-lg-3\">\n <article class=\"product_pod\">\n <div class=\"image_container\">\n <a href=\"catalogue/the-dirty-little-secrets-of-getting-your-dream-job_994/index.html\">\n <img alt=\"The Dirty Little Secrets of Getting Your Dream Job\" class=\"thumbnail\" src=\"media/cache/92/27/92274a95b7c251fea59a2b8a78275ab4.jpg\"/>\n </a>\n </div>\n <p class=\"star-rating Four\">\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n </p>\n <h3>\n <a href=\"catalogue/the-dirty-little-secrets-of-getting-your-dream-job_994/index.html\" title=\"The Dirty Little Secrets of Getting Your Dream Job\">\n The Dirty Little Secrets ...\n </a>\n </h3>\n <div class=\"product_price\">\n <p class=\"price_color\">\n £33.34\n </p>\n <p class=\"instock availability\">\n <i class=\"icon-ok\">\n </i>\n In stock\n </p>\n <form>\n <button class=\"btn btn-primary btn-block\" data-loading-text=\"Adding...\" type=\"submit\">\n Add to basket\n </button>\n </form>\n </div>\n </article>\n </li>\n <li class=\"col-xs-6 col-sm-4 col-md-3 col-lg-3\">\n <article class=\"product_pod\">\n <div class=\"image_container\">\n <a href=\"catalogue/the-coming-woman-a-novel-based-on-the-life-of-the-infamous-feminist-victoria-woodhull_993/index.html\">\n <img alt=\"The Coming Woman: A Novel Based on the Life of the Infamous Feminist, Victoria Woodhull\" class=\"thumbnail\" src=\"media/cache/3d/54/3d54940e57e662c4dd1f3ff00c78cc64.jpg\"/>\n </a>\n </div>\n <p class=\"star-rating Three\">\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n </p>\n <h3>\n <a href=\"catalogue/the-coming-woman-a-novel-based-on-the-life-of-the-infamous-feminist-victoria-woodhull_993/index.html\" title=\"The Coming Woman: A Novel Based on the Life of the Infamous Feminist, Victoria Woodhull\">\n The Coming Woman: A ...\n </a>\n </h3>\n <div class=\"product_price\">\n <p class=\"price_color\">\n £17.93\n </p>\n <p class=\"instock availability\">\n <i class=\"icon-ok\">\n </i>\n In stock\n </p>\n <form>\n <button class=\"btn btn-primary btn-block\" data-loading-text=\"Adding...\" type=\"submit\">\n Add to basket\n </button>\n </form>\n </div>\n </article>\n </li>\n <li class=\"col-xs-6 col-sm-4 col-md-3 col-lg-3\">\n <article class=\"product_pod\">\n <div class=\"image_container\">\n <a href=\"catalogue/the-boys-in-the-boat-nine-americans-and-their-epic-quest-for-gold-at-the-1936-berlin-olympics_992/index.html\">\n <img alt=\"The Boys in the Boat: Nine Americans and Their Epic Quest for Gold at the 1936 Berlin Olympics\" class=\"thumbnail\" src=\"media/cache/66/88/66883b91f6804b2323c8369331cb7dd1.jpg\"/>\n </a>\n </div>\n <p class=\"star-rating Four\">\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n </p>\n <h3>\n <a href=\"catalogue/the-boys-in-the-boat-nine-americans-and-their-epic-quest-for-gold-at-the-1936-berlin-olympics_992/index.html\" title=\"The Boys in the Boat: Nine Americans and Their Epic Quest for Gold at the 1936 Berlin Olympics\">\n The Boys in the ...\n </a>\n </h3>\n <div class=\"product_price\">\n <p class=\"price_color\">\n £22.60\n </p>\n <p class=\"instock availability\">\n <i class=\"icon-ok\">\n </i>\n In stock\n </p>\n <form>\n <button class=\"btn btn-primary btn-block\" data-loading-text=\"Adding...\" type=\"submit\">\n Add to basket\n </button>\n </form>\n </div>\n </article>\n </li>\n <li class=\"col-xs-6 col-sm-4 col-md-3 col-lg-3\">\n <article class=\"product_pod\">\n <div class=\"image_container\">\n <a href=\"catalogue/the-black-maria_991/index.html\">\n <img alt=\"The Black Maria\" class=\"thumbnail\" src=\"media/cache/58/46/5846057e28022268153beff6d352b06c.jpg\"/>\n </a>\n </div>\n <p class=\"star-rating One\">\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n </p>\n <h3>\n <a href=\"catalogue/the-black-maria_991/index.html\" title=\"The Black Maria\">\n The Black Maria\n </a>\n </h3>\n <div class=\"product_price\">\n <p class=\"price_color\">\n £52.15\n </p>\n <p class=\"instock availability\">\n <i class=\"icon-ok\">\n </i>\n In stock\n </p>\n <form>\n <button class=\"btn btn-primary btn-block\" data-loading-text=\"Adding...\" type=\"submit\">\n Add to basket\n </button>\n </form>\n </div>\n </article>\n </li>\n <li class=\"col-xs-6 col-sm-4 col-md-3 col-lg-3\">\n <article class=\"product_pod\">\n <div class=\"image_container\">\n <a href=\"catalogue/starving-hearts-triangular-trade-trilogy-1_990/index.html\">\n <img alt=\"Starving Hearts (Triangular Trade Trilogy, #1)\" class=\"thumbnail\" src=\"media/cache/be/f4/bef44da28c98f905a3ebec0b87be8530.jpg\"/>\n </a>\n </div>\n <p class=\"star-rating Two\">\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n </p>\n <h3>\n <a href=\"catalogue/starving-hearts-triangular-trade-trilogy-1_990/index.html\" title=\"Starving Hearts (Triangular Trade Trilogy, #1)\">\n Starving Hearts (Triangular Trade ...\n </a>\n </h3>\n <div class=\"product_price\">\n <p class=\"price_color\">\n £13.99\n </p>\n <p class=\"instock availability\">\n <i class=\"icon-ok\">\n </i>\n In stock\n </p>\n <form>\n <button class=\"btn btn-primary btn-block\" data-loading-text=\"Adding...\" type=\"submit\">\n Add to basket\n </button>\n </form>\n </div>\n </article>\n </li>\n <li class=\"col-xs-6 col-sm-4 col-md-3 col-lg-3\">\n <article class=\"product_pod\">\n <div class=\"image_container\">\n <a href=\"catalogue/shakespeares-sonnets_989/index.html\">\n <img alt=\"Shakespeare's Sonnets\" class=\"thumbnail\" src=\"media/cache/10/48/1048f63d3b5061cd2f424d20b3f9b666.jpg\"/>\n </a>\n </div>\n <p class=\"star-rating Four\">\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n </p>\n <h3>\n <a href=\"catalogue/shakespeares-sonnets_989/index.html\" title=\"Shakespeare's Sonnets\">\n Shakespeare's Sonnets\n </a>\n </h3>\n <div class=\"product_price\">\n <p class=\"price_color\">\n £20.66\n </p>\n <p class=\"instock availability\">\n <i class=\"icon-ok\">\n </i>\n In stock\n </p>\n <form>\n <button class=\"btn btn-primary btn-block\" data-loading-text=\"Adding...\" type=\"submit\">\n Add to basket\n </button>\n </form>\n </div>\n </article>\n </li>\n <li class=\"col-xs-6 col-sm-4 col-md-3 col-lg-3\">\n <article class=\"product_pod\">\n <div class=\"image_container\">\n <a href=\"catalogue/set-me-free_988/index.html\">\n <img alt=\"Set Me Free\" class=\"thumbnail\" src=\"media/cache/5b/88/5b88c52633f53cacf162c15f4f823153.jpg\"/>\n </a>\n </div>\n <p class=\"star-rating Five\">\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n </p>\n <h3>\n <a href=\"catalogue/set-me-free_988/index.html\" title=\"Set Me Free\">\n Set Me Free\n </a>\n </h3>\n <div class=\"product_price\">\n <p class=\"price_color\">\n £17.46\n </p>\n <p class=\"instock availability\">\n <i class=\"icon-ok\">\n </i>\n In stock\n </p>\n <form>\n <button class=\"btn btn-primary btn-block\" data-loading-text=\"Adding...\" type=\"submit\">\n Add to basket\n </button>\n </form>\n </div>\n </article>\n </li>\n <li class=\"col-xs-6 col-sm-4 col-md-3 col-lg-3\">\n <article class=\"product_pod\">\n <div class=\"image_container\">\n <a href=\"catalogue/scott-pilgrims-precious-little-life-scott-pilgrim-1_987/index.html\">\n <img alt=\"Scott Pilgrim's Precious Little Life (Scott Pilgrim #1)\" class=\"thumbnail\" src=\"media/cache/94/b1/94b1b8b244bce9677c2f29ccc890d4d2.jpg\"/>\n </a>\n </div>\n <p class=\"star-rating Five\">\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n </p>\n <h3>\n <a href=\"catalogue/scott-pilgrims-precious-little-life-scott-pilgrim-1_987/index.html\" title=\"Scott Pilgrim's Precious Little Life (Scott Pilgrim #1)\">\n Scott Pilgrim's Precious Little ...\n </a>\n </h3>\n <div class=\"product_price\">\n <p class=\"price_color\">\n £52.29\n </p>\n <p class=\"instock availability\">\n <i class=\"icon-ok\">\n </i>\n In stock\n </p>\n <form>\n <button class=\"btn btn-primary btn-block\" data-loading-text=\"Adding...\" type=\"submit\">\n Add to basket\n </button>\n </form>\n </div>\n </article>\n </li>\n <li class=\"col-xs-6 col-sm-4 col-md-3 col-lg-3\">\n <article class=\"product_pod\">\n <div class=\"image_container\">\n <a href=\"catalogue/rip-it-up-and-start-again_986/index.html\">\n <img alt=\"Rip it Up and Start Again\" class=\"thumbnail\" src=\"media/cache/81/c4/81c4a973364e17d01f217e1188253d5e.jpg\"/>\n </a>\n </div>\n <p class=\"star-rating Five\">\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n </p>\n <h3>\n <a href=\"catalogue/rip-it-up-and-start-again_986/index.html\" title=\"Rip it Up and Start Again\">\n Rip it Up and ...\n </a>\n </h3>\n <div class=\"product_price\">\n <p class=\"price_color\">\n £35.02\n </p>\n <p class=\"instock availability\">\n <i class=\"icon-ok\">\n </i>\n In stock\n </p>\n <form>\n <button class=\"btn btn-primary btn-block\" data-loading-text=\"Adding...\" type=\"submit\">\n Add to basket\n </button>\n </form>\n </div>\n </article>\n </li>\n <li class=\"col-xs-6 col-sm-4 col-md-3 col-lg-3\">\n <article class=\"product_pod\">\n <div class=\"image_container\">\n <a href=\"catalogue/our-band-could-be-your-life-scenes-from-the-american-indie-underground-1981-1991_985/index.html\">\n <img alt=\"Our Band Could Be Your Life: Scenes from the American Indie Underground, 1981-1991\" class=\"thumbnail\" src=\"media/cache/54/60/54607fe8945897cdcced0044103b10b6.jpg\"/>\n </a>\n </div>\n <p class=\"star-rating Three\">\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n </p>\n <h3>\n <a href=\"catalogue/our-band-could-be-your-life-scenes-from-the-american-indie-underground-1981-1991_985/index.html\" title=\"Our Band Could Be Your Life: Scenes from the American Indie Underground, 1981-1991\">\n Our Band Could Be ...\n </a>\n </h3>\n <div class=\"product_price\">\n <p class=\"price_color\">\n £57.25\n </p>\n <p class=\"instock availability\">\n <i class=\"icon-ok\">\n </i>\n In stock\n </p>\n <form>\n <button class=\"btn btn-primary btn-block\" data-loading-text=\"Adding...\" type=\"submit\">\n Add to basket\n </button>\n </form>\n </div>\n </article>\n </li>\n <li class=\"col-xs-6 col-sm-4 col-md-3 col-lg-3\">\n <article class=\"product_pod\">\n <div class=\"image_container\">\n <a href=\"catalogue/olio_984/index.html\">\n <img alt=\"Olio\" class=\"thumbnail\" src=\"media/cache/55/33/553310a7162dfbc2c6d19a84da0df9e1.jpg\"/>\n </a>\n </div>\n <p class=\"star-rating One\">\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n </p>\n <h3>\n <a href=\"catalogue/olio_984/index.html\" title=\"Olio\">\n Olio\n </a>\n </h3>\n <div class=\"product_price\">\n <p class=\"price_color\">\n £23.88\n </p>\n <p class=\"instock availability\">\n <i class=\"icon-ok\">\n </i>\n In stock\n </p>\n <form>\n <button class=\"btn btn-primary btn-block\" data-loading-text=\"Adding...\" type=\"submit\">\n Add to basket\n </button>\n </form>\n </div>\n </article>\n </li>\n <li class=\"col-xs-6 col-sm-4 col-md-3 col-lg-3\">\n <article class=\"product_pod\">\n <div class=\"image_container\">\n <a href=\"catalogue/mesaerion-the-best-science-fiction-stories-1800-1849_983/index.html\">\n <img alt=\"Mesaerion: The Best Science Fiction Stories 1800-1849\" class=\"thumbnail\" src=\"media/cache/09/a3/09a3aef48557576e1a85ba7efea8ecb7.jpg\"/>\n </a>\n </div>\n <p class=\"star-rating One\">\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n </p>\n <h3>\n <a href=\"catalogue/mesaerion-the-best-science-fiction-stories-1800-1849_983/index.html\" title=\"Mesaerion: The Best Science Fiction Stories 1800-1849\">\n Mesaerion: The Best Science ...\n </a>\n </h3>\n <div class=\"product_price\">\n <p class=\"price_color\">\n £37.59\n </p>\n <p class=\"instock availability\">\n <i class=\"icon-ok\">\n </i>\n In stock\n </p>\n <form>\n <button class=\"btn btn-primary btn-block\" data-loading-text=\"Adding...\" type=\"submit\">\n Add to basket\n </button>\n </form>\n </div>\n </article>\n </li>\n <li class=\"col-xs-6 col-sm-4 col-md-3 col-lg-3\">\n <article class=\"product_pod\">\n <div class=\"image_container\">\n <a href=\"catalogue/libertarianism-for-beginners_982/index.html\">\n <img alt=\"Libertarianism for Beginners\" class=\"thumbnail\" src=\"media/cache/0b/bc/0bbcd0a6f4bcd81ccb1049a52736406e.jpg\"/>\n </a>\n </div>\n <p class=\"star-rating Two\">\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n </p>\n <h3>\n <a href=\"catalogue/libertarianism-for-beginners_982/index.html\" title=\"Libertarianism for Beginners\">\n Libertarianism for Beginners\n </a>\n </h3>\n <div class=\"product_price\">\n <p class=\"price_color\">\n £51.33\n </p>\n <p class=\"instock availability\">\n <i class=\"icon-ok\">\n </i>\n In stock\n </p>\n <form>\n <button class=\"btn btn-primary btn-block\" data-loading-text=\"Adding...\" type=\"submit\">\n Add to basket\n </button>\n </form>\n </div>\n </article>\n </li>\n <li class=\"col-xs-6 col-sm-4 col-md-3 col-lg-3\">\n <article class=\"product_pod\">\n <div class=\"image_container\">\n <a href=\"catalogue/its-only-the-himalayas_981/index.html\">\n <img alt=\"It's Only the Himalayas\" class=\"thumbnail\" src=\"media/cache/27/a5/27a53d0bb95bdd88288eaf66c9230d7e.jpg\"/>\n </a>\n </div>\n <p class=\"star-rating Two\">\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n <i class=\"icon-star\">\n </i>\n </p>\n <h3>\n <a href=\"catalogue/its-only-the-himalayas_981/index.html\" title=\"It's Only the Himalayas\">\n It's Only the Himalayas\n </a>\n </h3>\n <div class=\"product_price\">\n <p class=\"price_color\">\n £45.17\n </p>\n <p class=\"instock availability\">\n <i class=\"icon-ok\">\n </i>\n In stock\n </p>\n <form>\n <button class=\"btn btn-primary btn-block\" data-loading-text=\"Adding...\" type=\"submit\">\n Add to basket\n </button>\n </form>\n </div>\n </article>\n </li>\n </ol>\n <div>\n <ul class=\"pager\">\n <li class=\"current\">\n Page 1 of 50\n </li>\n <li class=\"next\">\n <a href=\"catalogue/page-2.html\">\n next\n </a>\n </li>\n </ul>\n </div>\n </div>\n </section>\n </div>\n </div>\n <!-- /row -->\n </div>\n <!-- /page_inner -->\n</div>\n" ], [ "a_tags = SoupStrainer(\"a\") #Getting Only A Tags", "_____no_output_____" ], [ "soup = BeautifulSoup(resp.text,\"lxml\",parse_only = a_tags)\nprint(soup.prettify())", "<!DOCTYPE html>\n<a href=\"index.html\">\n Books to Scrape\n</a>\n<a href=\"index.html\">\n Home\n</a>\n<a href=\"catalogue/category/books_1/index.html\">\n Books\n</a>\n<a href=\"catalogue/category/books/travel_2/index.html\">\n Travel\n</a>\n<a href=\"catalogue/category/books/mystery_3/index.html\">\n Mystery\n</a>\n<a href=\"catalogue/category/books/historical-fiction_4/index.html\">\n Historical Fiction\n</a>\n<a href=\"catalogue/category/books/sequential-art_5/index.html\">\n Sequential Art\n</a>\n<a href=\"catalogue/category/books/classics_6/index.html\">\n Classics\n</a>\n<a href=\"catalogue/category/books/philosophy_7/index.html\">\n Philosophy\n</a>\n<a href=\"catalogue/category/books/romance_8/index.html\">\n Romance\n</a>\n<a href=\"catalogue/category/books/womens-fiction_9/index.html\">\n Womens Fiction\n</a>\n<a href=\"catalogue/category/books/fiction_10/index.html\">\n Fiction\n</a>\n<a href=\"catalogue/category/books/childrens_11/index.html\">\n Childrens\n</a>\n<a href=\"catalogue/category/books/religion_12/index.html\">\n Religion\n</a>\n<a href=\"catalogue/category/books/nonfiction_13/index.html\">\n Nonfiction\n</a>\n<a href=\"catalogue/category/books/music_14/index.html\">\n Music\n</a>\n<a href=\"catalogue/category/books/default_15/index.html\">\n Default\n</a>\n<a href=\"catalogue/category/books/science-fiction_16/index.html\">\n Science Fiction\n</a>\n<a href=\"catalogue/category/books/sports-and-games_17/index.html\">\n Sports and Games\n</a>\n<a href=\"catalogue/category/books/add-a-comment_18/index.html\">\n Add a comment\n</a>\n<a href=\"catalogue/category/books/fantasy_19/index.html\">\n Fantasy\n</a>\n<a href=\"catalogue/category/books/new-adult_20/index.html\">\n New Adult\n</a>\n<a href=\"catalogue/category/books/young-adult_21/index.html\">\n Young Adult\n</a>\n<a href=\"catalogue/category/books/science_22/index.html\">\n Science\n</a>\n<a href=\"catalogue/category/books/poetry_23/index.html\">\n Poetry\n</a>\n<a href=\"catalogue/category/books/paranormal_24/index.html\">\n Paranormal\n</a>\n<a href=\"catalogue/category/books/art_25/index.html\">\n Art\n</a>\n<a href=\"catalogue/category/books/psychology_26/index.html\">\n Psychology\n</a>\n<a href=\"catalogue/category/books/autobiography_27/index.html\">\n Autobiography\n</a>\n<a href=\"catalogue/category/books/parenting_28/index.html\">\n Parenting\n</a>\n<a href=\"catalogue/category/books/adult-fiction_29/index.html\">\n Adult Fiction\n</a>\n<a href=\"catalogue/category/books/humor_30/index.html\">\n Humor\n</a>\n<a href=\"catalogue/category/books/horror_31/index.html\">\n Horror\n</a>\n<a href=\"catalogue/category/books/history_32/index.html\">\n History\n</a>\n<a href=\"catalogue/category/books/food-and-drink_33/index.html\">\n Food and Drink\n</a>\n<a href=\"catalogue/category/books/christian-fiction_34/index.html\">\n Christian Fiction\n</a>\n<a href=\"catalogue/category/books/business_35/index.html\">\n Business\n</a>\n<a href=\"catalogue/category/books/biography_36/index.html\">\n Biography\n</a>\n<a href=\"catalogue/category/books/thriller_37/index.html\">\n Thriller\n</a>\n<a href=\"catalogue/category/books/contemporary_38/index.html\">\n Contemporary\n</a>\n<a href=\"catalogue/category/books/spirituality_39/index.html\">\n Spirituality\n</a>\n<a href=\"catalogue/category/books/academic_40/index.html\">\n Academic\n</a>\n<a href=\"catalogue/category/books/self-help_41/index.html\">\n Self Help\n</a>\n<a href=\"catalogue/category/books/historical_42/index.html\">\n Historical\n</a>\n<a href=\"catalogue/category/books/christian_43/index.html\">\n Christian\n</a>\n<a href=\"catalogue/category/books/suspense_44/index.html\">\n Suspense\n</a>\n<a href=\"catalogue/category/books/short-stories_45/index.html\">\n Short Stories\n</a>\n<a href=\"catalogue/category/books/novels_46/index.html\">\n Novels\n</a>\n<a href=\"catalogue/category/books/health_47/index.html\">\n Health\n</a>\n<a href=\"catalogue/category/books/politics_48/index.html\">\n Politics\n</a>\n<a href=\"catalogue/category/books/cultural_49/index.html\">\n Cultural\n</a>\n<a href=\"catalogue/category/books/erotica_50/index.html\">\n Erotica\n</a>\n<a href=\"catalogue/category/books/crime_51/index.html\">\n Crime\n</a>\n<a href=\"catalogue/a-light-in-the-attic_1000/index.html\">\n <img alt=\"A Light in the Attic\" class=\"thumbnail\" src=\"media/cache/2c/da/2cdad67c44b002e7ead0cc35693c0e8b.jpg\"/>\n</a>\n<a href=\"catalogue/a-light-in-the-attic_1000/index.html\" title=\"A Light in the Attic\">\n A Light in the ...\n</a>\n<a href=\"catalogue/tipping-the-velvet_999/index.html\">\n <img alt=\"Tipping the Velvet\" class=\"thumbnail\" src=\"media/cache/26/0c/260c6ae16bce31c8f8c95daddd9f4a1c.jpg\"/>\n</a>\n<a href=\"catalogue/tipping-the-velvet_999/index.html\" title=\"Tipping the Velvet\">\n Tipping the Velvet\n</a>\n<a href=\"catalogue/soumission_998/index.html\">\n <img alt=\"Soumission\" class=\"thumbnail\" src=\"media/cache/3e/ef/3eef99c9d9adef34639f510662022830.jpg\"/>\n</a>\n<a href=\"catalogue/soumission_998/index.html\" title=\"Soumission\">\n Soumission\n</a>\n<a href=\"catalogue/sharp-objects_997/index.html\">\n <img alt=\"Sharp Objects\" class=\"thumbnail\" src=\"media/cache/32/51/3251cf3a3412f53f339e42cac2134093.jpg\"/>\n</a>\n<a href=\"catalogue/sharp-objects_997/index.html\" title=\"Sharp Objects\">\n Sharp Objects\n</a>\n<a href=\"catalogue/sapiens-a-brief-history-of-humankind_996/index.html\">\n <img alt=\"Sapiens: A Brief History of Humankind\" class=\"thumbnail\" src=\"media/cache/be/a5/bea5697f2534a2f86a3ef27b5a8c12a6.jpg\"/>\n</a>\n<a href=\"catalogue/sapiens-a-brief-history-of-humankind_996/index.html\" title=\"Sapiens: A Brief History of Humankind\">\n Sapiens: A Brief History ...\n</a>\n<a href=\"catalogue/the-requiem-red_995/index.html\">\n <img alt=\"The Requiem Red\" class=\"thumbnail\" src=\"media/cache/68/33/68339b4c9bc034267e1da611ab3b34f8.jpg\"/>\n</a>\n<a href=\"catalogue/the-requiem-red_995/index.html\" title=\"The Requiem Red\">\n The Requiem Red\n</a>\n<a href=\"catalogue/the-dirty-little-secrets-of-getting-your-dream-job_994/index.html\">\n <img alt=\"The Dirty Little Secrets of Getting Your Dream Job\" class=\"thumbnail\" src=\"media/cache/92/27/92274a95b7c251fea59a2b8a78275ab4.jpg\"/>\n</a>\n<a href=\"catalogue/the-dirty-little-secrets-of-getting-your-dream-job_994/index.html\" title=\"The Dirty Little Secrets of Getting Your Dream Job\">\n The Dirty Little Secrets ...\n</a>\n<a href=\"catalogue/the-coming-woman-a-novel-based-on-the-life-of-the-infamous-feminist-victoria-woodhull_993/index.html\">\n <img alt=\"The Coming Woman: A Novel Based on the Life of the Infamous Feminist, Victoria Woodhull\" class=\"thumbnail\" src=\"media/cache/3d/54/3d54940e57e662c4dd1f3ff00c78cc64.jpg\"/>\n</a>\n<a href=\"catalogue/the-coming-woman-a-novel-based-on-the-life-of-the-infamous-feminist-victoria-woodhull_993/index.html\" title=\"The Coming Woman: A Novel Based on the Life of the Infamous Feminist, Victoria Woodhull\">\n The Coming Woman: A ...\n</a>\n<a href=\"catalogue/the-boys-in-the-boat-nine-americans-and-their-epic-quest-for-gold-at-the-1936-berlin-olympics_992/index.html\">\n <img alt=\"The Boys in the Boat: Nine Americans and Their Epic Quest for Gold at the 1936 Berlin Olympics\" class=\"thumbnail\" src=\"media/cache/66/88/66883b91f6804b2323c8369331cb7dd1.jpg\"/>\n</a>\n<a href=\"catalogue/the-boys-in-the-boat-nine-americans-and-their-epic-quest-for-gold-at-the-1936-berlin-olympics_992/index.html\" title=\"The Boys in the Boat: Nine Americans and Their Epic Quest for Gold at the 1936 Berlin Olympics\">\n The Boys in the ...\n</a>\n<a href=\"catalogue/the-black-maria_991/index.html\">\n <img alt=\"The Black Maria\" class=\"thumbnail\" src=\"media/cache/58/46/5846057e28022268153beff6d352b06c.jpg\"/>\n</a>\n<a href=\"catalogue/the-black-maria_991/index.html\" title=\"The Black Maria\">\n The Black Maria\n</a>\n<a href=\"catalogue/starving-hearts-triangular-trade-trilogy-1_990/index.html\">\n <img alt=\"Starving Hearts (Triangular Trade Trilogy, #1)\" class=\"thumbnail\" src=\"media/cache/be/f4/bef44da28c98f905a3ebec0b87be8530.jpg\"/>\n</a>\n<a href=\"catalogue/starving-hearts-triangular-trade-trilogy-1_990/index.html\" title=\"Starving Hearts (Triangular Trade Trilogy, #1)\">\n Starving Hearts (Triangular Trade ...\n</a>\n<a href=\"catalogue/shakespeares-sonnets_989/index.html\">\n <img alt=\"Shakespeare's Sonnets\" class=\"thumbnail\" src=\"media/cache/10/48/1048f63d3b5061cd2f424d20b3f9b666.jpg\"/>\n</a>\n<a href=\"catalogue/shakespeares-sonnets_989/index.html\" title=\"Shakespeare's Sonnets\">\n Shakespeare's Sonnets\n</a>\n<a href=\"catalogue/set-me-free_988/index.html\">\n <img alt=\"Set Me Free\" class=\"thumbnail\" src=\"media/cache/5b/88/5b88c52633f53cacf162c15f4f823153.jpg\"/>\n</a>\n<a href=\"catalogue/set-me-free_988/index.html\" title=\"Set Me Free\">\n Set Me Free\n</a>\n<a href=\"catalogue/scott-pilgrims-precious-little-life-scott-pilgrim-1_987/index.html\">\n <img alt=\"Scott Pilgrim's Precious Little Life (Scott Pilgrim #1)\" class=\"thumbnail\" src=\"media/cache/94/b1/94b1b8b244bce9677c2f29ccc890d4d2.jpg\"/>\n</a>\n<a href=\"catalogue/scott-pilgrims-precious-little-life-scott-pilgrim-1_987/index.html\" title=\"Scott Pilgrim's Precious Little Life (Scott Pilgrim #1)\">\n Scott Pilgrim's Precious Little ...\n</a>\n<a href=\"catalogue/rip-it-up-and-start-again_986/index.html\">\n <img alt=\"Rip it Up and Start Again\" class=\"thumbnail\" src=\"media/cache/81/c4/81c4a973364e17d01f217e1188253d5e.jpg\"/>\n</a>\n<a href=\"catalogue/rip-it-up-and-start-again_986/index.html\" title=\"Rip it Up and Start Again\">\n Rip it Up and ...\n</a>\n<a href=\"catalogue/our-band-could-be-your-life-scenes-from-the-american-indie-underground-1981-1991_985/index.html\">\n <img alt=\"Our Band Could Be Your Life: Scenes from the American Indie Underground, 1981-1991\" class=\"thumbnail\" src=\"media/cache/54/60/54607fe8945897cdcced0044103b10b6.jpg\"/>\n</a>\n<a href=\"catalogue/our-band-could-be-your-life-scenes-from-the-american-indie-underground-1981-1991_985/index.html\" title=\"Our Band Could Be Your Life: Scenes from the American Indie Underground, 1981-1991\">\n Our Band Could Be ...\n</a>\n<a href=\"catalogue/olio_984/index.html\">\n <img alt=\"Olio\" class=\"thumbnail\" src=\"media/cache/55/33/553310a7162dfbc2c6d19a84da0df9e1.jpg\"/>\n</a>\n<a href=\"catalogue/olio_984/index.html\" title=\"Olio\">\n Olio\n</a>\n<a href=\"catalogue/mesaerion-the-best-science-fiction-stories-1800-1849_983/index.html\">\n <img alt=\"Mesaerion: The Best Science Fiction Stories 1800-1849\" class=\"thumbnail\" src=\"media/cache/09/a3/09a3aef48557576e1a85ba7efea8ecb7.jpg\"/>\n</a>\n<a href=\"catalogue/mesaerion-the-best-science-fiction-stories-1800-1849_983/index.html\" title=\"Mesaerion: The Best Science Fiction Stories 1800-1849\">\n Mesaerion: The Best Science ...\n</a>\n<a href=\"catalogue/libertarianism-for-beginners_982/index.html\">\n <img alt=\"Libertarianism for Beginners\" class=\"thumbnail\" src=\"media/cache/0b/bc/0bbcd0a6f4bcd81ccb1049a52736406e.jpg\"/>\n</a>\n<a href=\"catalogue/libertarianism-for-beginners_982/index.html\" title=\"Libertarianism for Beginners\">\n Libertarianism for Beginners\n</a>\n<a href=\"catalogue/its-only-the-himalayas_981/index.html\">\n <img alt=\"It's Only the Himalayas\" class=\"thumbnail\" src=\"media/cache/27/a5/27a53d0bb95bdd88288eaf66c9230d7e.jpg\"/>\n</a>\n<a href=\"catalogue/its-only-the-himalayas_981/index.html\" title=\"It's Only the Himalayas\">\n It's Only the Himalayas\n</a>\n<a href=\"catalogue/page-2.html\">\n next\n</a>\n" ], [ "img_tags = SoupStrainer(\"img\") #Getting Only Img Tags", "_____no_output_____" ], [ "soup = BeautifulSoup(resp.text,\"lxml\",parse_only = img_tags)\nprint(soup.prettify())", "<!DOCTYPE html>\n<img alt=\"A Light in the Attic\" class=\"thumbnail\" src=\"media/cache/2c/da/2cdad67c44b002e7ead0cc35693c0e8b.jpg\"/>\n<img alt=\"Tipping the Velvet\" class=\"thumbnail\" src=\"media/cache/26/0c/260c6ae16bce31c8f8c95daddd9f4a1c.jpg\"/>\n<img alt=\"Soumission\" class=\"thumbnail\" src=\"media/cache/3e/ef/3eef99c9d9adef34639f510662022830.jpg\"/>\n<img alt=\"Sharp Objects\" class=\"thumbnail\" src=\"media/cache/32/51/3251cf3a3412f53f339e42cac2134093.jpg\"/>\n<img alt=\"Sapiens: A Brief History of Humankind\" class=\"thumbnail\" src=\"media/cache/be/a5/bea5697f2534a2f86a3ef27b5a8c12a6.jpg\"/>\n<img alt=\"The Requiem Red\" class=\"thumbnail\" src=\"media/cache/68/33/68339b4c9bc034267e1da611ab3b34f8.jpg\"/>\n<img alt=\"The Dirty Little Secrets of Getting Your Dream Job\" class=\"thumbnail\" src=\"media/cache/92/27/92274a95b7c251fea59a2b8a78275ab4.jpg\"/>\n<img alt=\"The Coming Woman: A Novel Based on the Life of the Infamous Feminist, Victoria Woodhull\" class=\"thumbnail\" src=\"media/cache/3d/54/3d54940e57e662c4dd1f3ff00c78cc64.jpg\"/>\n<img alt=\"The Boys in the Boat: Nine Americans and Their Epic Quest for Gold at the 1936 Berlin Olympics\" class=\"thumbnail\" src=\"media/cache/66/88/66883b91f6804b2323c8369331cb7dd1.jpg\"/>\n<img alt=\"The Black Maria\" class=\"thumbnail\" src=\"media/cache/58/46/5846057e28022268153beff6d352b06c.jpg\"/>\n<img alt=\"Starving Hearts (Triangular Trade Trilogy, #1)\" class=\"thumbnail\" src=\"media/cache/be/f4/bef44da28c98f905a3ebec0b87be8530.jpg\"/>\n<img alt=\"Shakespeare's Sonnets\" class=\"thumbnail\" src=\"media/cache/10/48/1048f63d3b5061cd2f424d20b3f9b666.jpg\"/>\n<img alt=\"Set Me Free\" class=\"thumbnail\" src=\"media/cache/5b/88/5b88c52633f53cacf162c15f4f823153.jpg\"/>\n<img alt=\"Scott Pilgrim's Precious Little Life (Scott Pilgrim #1)\" class=\"thumbnail\" src=\"media/cache/94/b1/94b1b8b244bce9677c2f29ccc890d4d2.jpg\"/>\n<img alt=\"Rip it Up and Start Again\" class=\"thumbnail\" src=\"media/cache/81/c4/81c4a973364e17d01f217e1188253d5e.jpg\"/>\n<img alt=\"Our Band Could Be Your Life: Scenes from the American Indie Underground, 1981-1991\" class=\"thumbnail\" src=\"media/cache/54/60/54607fe8945897cdcced0044103b10b6.jpg\"/>\n<img alt=\"Olio\" class=\"thumbnail\" src=\"media/cache/55/33/553310a7162dfbc2c6d19a84da0df9e1.jpg\"/>\n<img alt=\"Mesaerion: The Best Science Fiction Stories 1800-1849\" class=\"thumbnail\" src=\"media/cache/09/a3/09a3aef48557576e1a85ba7efea8ecb7.jpg\"/>\n<img alt=\"Libertarianism for Beginners\" class=\"thumbnail\" src=\"media/cache/0b/bc/0bbcd0a6f4bcd81ccb1049a52736406e.jpg\"/>\n<img alt=\"It's Only the Himalayas\" class=\"thumbnail\" src=\"media/cache/27/a5/27a53d0bb95bdd88288eaf66c9230d7e.jpg\"/>\n\n" ], [ "alt_attr = SoupStrainer(alt=\"It's Only the Himalayas\")", "_____no_output_____" ], [ "soup = BeautifulSoup(resp.text,\"lxml\",parse_only = alt_attr)\nprint(soup.prettify())", "<!DOCTYPE html>\n<img alt=\"It's Only the Himalayas\" class=\"thumbnail\" src=\"media/cache/27/a5/27a53d0bb95bdd88288eaf66c9230d7e.jpg\"/>\n\n" ], [ "index_html_only = SoupStrainer(href = re.compile('book'))", "_____no_output_____" ], [ "soup = BeautifulSoup(resp.text,\"lxml\",parse_only = index_html_only)\nprint(soup.prettify())", "<!DOCTYPE html>\n<a href=\"catalogue/category/books_1/index.html\">\n Books\n</a>\n<a href=\"catalogue/category/books/travel_2/index.html\">\n Travel\n</a>\n<a href=\"catalogue/category/books/mystery_3/index.html\">\n Mystery\n</a>\n<a href=\"catalogue/category/books/historical-fiction_4/index.html\">\n Historical Fiction\n</a>\n<a href=\"catalogue/category/books/sequential-art_5/index.html\">\n Sequential Art\n</a>\n<a href=\"catalogue/category/books/classics_6/index.html\">\n Classics\n</a>\n<a href=\"catalogue/category/books/philosophy_7/index.html\">\n Philosophy\n</a>\n<a href=\"catalogue/category/books/romance_8/index.html\">\n Romance\n</a>\n<a href=\"catalogue/category/books/womens-fiction_9/index.html\">\n Womens Fiction\n</a>\n<a href=\"catalogue/category/books/fiction_10/index.html\">\n Fiction\n</a>\n<a href=\"catalogue/category/books/childrens_11/index.html\">\n Childrens\n</a>\n<a href=\"catalogue/category/books/religion_12/index.html\">\n Religion\n</a>\n<a href=\"catalogue/category/books/nonfiction_13/index.html\">\n Nonfiction\n</a>\n<a href=\"catalogue/category/books/music_14/index.html\">\n Music\n</a>\n<a href=\"catalogue/category/books/default_15/index.html\">\n Default\n</a>\n<a href=\"catalogue/category/books/science-fiction_16/index.html\">\n Science Fiction\n</a>\n<a href=\"catalogue/category/books/sports-and-games_17/index.html\">\n Sports and Games\n</a>\n<a href=\"catalogue/category/books/add-a-comment_18/index.html\">\n Add a comment\n</a>\n<a href=\"catalogue/category/books/fantasy_19/index.html\">\n Fantasy\n</a>\n<a href=\"catalogue/category/books/new-adult_20/index.html\">\n New Adult\n</a>\n<a href=\"catalogue/category/books/young-adult_21/index.html\">\n Young Adult\n</a>\n<a href=\"catalogue/category/books/science_22/index.html\">\n Science\n</a>\n<a href=\"catalogue/category/books/poetry_23/index.html\">\n Poetry\n</a>\n<a href=\"catalogue/category/books/paranormal_24/index.html\">\n Paranormal\n</a>\n<a href=\"catalogue/category/books/art_25/index.html\">\n Art\n</a>\n<a href=\"catalogue/category/books/psychology_26/index.html\">\n Psychology\n</a>\n<a href=\"catalogue/category/books/autobiography_27/index.html\">\n Autobiography\n</a>\n<a href=\"catalogue/category/books/parenting_28/index.html\">\n Parenting\n</a>\n<a href=\"catalogue/category/books/adult-fiction_29/index.html\">\n Adult Fiction\n</a>\n<a href=\"catalogue/category/books/humor_30/index.html\">\n Humor\n</a>\n<a href=\"catalogue/category/books/horror_31/index.html\">\n Horror\n</a>\n<a href=\"catalogue/category/books/history_32/index.html\">\n History\n</a>\n<a href=\"catalogue/category/books/food-and-drink_33/index.html\">\n Food and Drink\n</a>\n<a href=\"catalogue/category/books/christian-fiction_34/index.html\">\n Christian Fiction\n</a>\n<a href=\"catalogue/category/books/business_35/index.html\">\n Business\n</a>\n<a href=\"catalogue/category/books/biography_36/index.html\">\n Biography\n</a>\n<a href=\"catalogue/category/books/thriller_37/index.html\">\n Thriller\n</a>\n<a href=\"catalogue/category/books/contemporary_38/index.html\">\n Contemporary\n</a>\n<a href=\"catalogue/category/books/spirituality_39/index.html\">\n Spirituality\n</a>\n<a href=\"catalogue/category/books/academic_40/index.html\">\n Academic\n</a>\n<a href=\"catalogue/category/books/self-help_41/index.html\">\n Self Help\n</a>\n<a href=\"catalogue/category/books/historical_42/index.html\">\n Historical\n</a>\n<a href=\"catalogue/category/books/christian_43/index.html\">\n Christian\n</a>\n<a href=\"catalogue/category/books/suspense_44/index.html\">\n Suspense\n</a>\n<a href=\"catalogue/category/books/short-stories_45/index.html\">\n Short Stories\n</a>\n<a href=\"catalogue/category/books/novels_46/index.html\">\n Novels\n</a>\n<a href=\"catalogue/category/books/health_47/index.html\">\n Health\n</a>\n<a href=\"catalogue/category/books/politics_48/index.html\">\n Politics\n</a>\n<a href=\"catalogue/category/books/cultural_49/index.html\">\n Cultural\n</a>\n<a href=\"catalogue/category/books/erotica_50/index.html\">\n Erotica\n</a>\n<a href=\"catalogue/category/books/crime_51/index.html\">\n Crime\n</a>\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a7962044230a302674172743ff2b72a944d45a9
263,471
ipynb
Jupyter Notebook
notebooks/parametric sparse GP recovery.ipynb
davmre/sigvisa
91a1f163b8f3a258dfb78d88a07f2a11da41bd04
[ "BSD-3-Clause" ]
null
null
null
notebooks/parametric sparse GP recovery.ipynb
davmre/sigvisa
91a1f163b8f3a258dfb78d88a07f2a11da41bd04
[ "BSD-3-Clause" ]
null
null
null
notebooks/parametric sparse GP recovery.ipynb
davmre/sigvisa
91a1f163b8f3a258dfb78d88a07f2a11da41bd04
[ "BSD-3-Clause" ]
null
null
null
497.115094
50,934
0.920511
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
4a796afc8f0ed0f94e1dd0e15c94f9875fb878ff
230,443
ipynb
Jupyter Notebook
.ipynb_checkpoints/Jupyter_metagenomic_analysis-checkpoint.ipynb
EnriqueDoster/MEG_R_metagenomic_analysis
b801f7d4fc49215b85a5bef187c697bc334d8b0a
[ "MIT" ]
null
null
null
.ipynb_checkpoints/Jupyter_metagenomic_analysis-checkpoint.ipynb
EnriqueDoster/MEG_R_metagenomic_analysis
b801f7d4fc49215b85a5bef187c697bc334d8b0a
[ "MIT" ]
null
null
null
.ipynb_checkpoints/Jupyter_metagenomic_analysis-checkpoint.ipynb
EnriqueDoster/MEG_R_metagenomic_analysis
b801f7d4fc49215b85a5bef187c697bc334d8b0a
[ "MIT" ]
3
2020-02-23T20:45:20.000Z
2021-08-25T16:14:04.000Z
89.841326
102,006
0.73028
[ [ [ "# Bioinfomatic central script\n\n", "_____no_output_____" ] ], [ [ "# Source the utility functions file, which should be in the scripts folder with this file\nsource('scripts/meg_utility_functions.R')\nsource('scripts/load_libraries.R')", "_____no_output_____" ] ], [ [ "## USER Controls\n\n\nFirst, we'll need to specify the location of important files on your machine. \n \n You'll need to input files associated with the microbiome and resistome seperately. This allows for the option of including microbiome results from qiime2 or kraken2. \n \n For the resistome:\n > Metadata file for all resistome samples (.csv)\n > Megares annotation file (.csv)\n > Count table results from the AMRplusplus pipeline (.csv)\n For the microbiome\n > Metadata file for all microbiome samples (.tsv)\n > etc..\n ", "_____no_output_____" ] ], [ [ "# In which column of the metadata file are the sample IDs stored?\nsample_column_id = 'ID'\n# Set the output directory for graphs:\ngraph_output_dir = 'graphs'\n# Set the output directory for statistics:\nstats_output_dir = 'stats'", "_____no_output_____" ] ], [ [ "# For the resistome:", "_____no_output_____" ] ], [ [ "# Load the data, MEGARes annotations, and metadata\namr_count_matrix_filepath = 'data/test_data/strict_SNP_confirmed_AMR_analytic_matrix.csv'\n# Where is the metadata file stored on your machine?\namr_metadata_filepath = 'data/test_data/FC_meat_AMR_metadata.csv'\n# Name of the megares annotation file used for this project\nmegares_annotation_filename = 'data/amr/megares_annotations_v1.03.csv'", "_____no_output_____" ] ], [ [ "# For the microbiome:", "_____no_output_____" ] ], [ [ "##### Two options for microbiome analysis, either 16S (below) or shotgun (I'll add)\nmicrobiome_temp_metadata_file <- \"data/test_data/FC_meat_metadata.csv\"\n# Now, specify file location for 16S\nbiom_file <- \"data/test_data/exported-biom-table/otu_table_json.biom\"\ntre_file <- \"data/test_data/exported-tree/tree.nwk\"\ntax_fasta <- \"data/test_data/exported-rep-seqs/dna-sequences.fasta\" #https://data.qiime2.org/2017.6/tutorials/training-feature-classifiers/85_otus.fasta\ntaxa_file <- \"data/test_data/exported-biom-table-taxa/taxonomy.tsv\" #https://data.qiime2.org/2017.6/tutorials/training-feature-classifiers/85_otu_taxonomy.txt\n\n### or Shotgun analysis \n#microbiome_temp_metadata_file = \"../FC_meat_metadata.csv\"\n#kraken_temp_file <- read.table('microbiome_analytic_matrix.csv', header=T, row.names=1, sep=',')", "_____no_output_____" ] ], [ [ "# Next, we have to specify which variables you want to create exploratory graphs with\n\nWe should try to make this a click through option. And some users might not need both the AMR and microbiome analyses.", "_____no_output_____" ] ], [ [ "# The following is a list of analyses based on variables in \n# your metadata.csv file that you want\n# to use for EXPLORATORY analysis (NMDS, PCA, alpha rarefaction, barplots)\n# NOTE: Exploratory variables cannot be numeric. \n\nAMR_exploratory_analyses = list(\n # Analysis Store\n # Description: \n list(\n name = 'Store',\n subsets = list(),\n exploratory_var = 'Blinded_Store',\n order = ''\n ), \n # Analysis Dilution\n # Description: \n list(\n name = 'Dilution',\n subsets = list(),\n exploratory_var = 'Dilution',\n order = ''\n ),\n # Analysis 2\n # Description:\n list(\n name = 'Treatment',\n subsets = list(),\n exploratory_var = 'Treatment',\n order = ''\n ),\n # Analysis 3\n # Description:\n list(\n name = 'Packaging',\n subsets = list(),\n exploratory_var = 'Packaging',\n order = ''\n )\n)\n\n\n\nmicrobiome_exploratory_analyses = list(\n # Analysis Store\n # Description: \n list(\n name = 'Store',\n subsets = list(),\n exploratory_var = 'Blinded_Store',\n order = ''\n ), \n # Analysis ID\n # Description: \n list(\n name = 'ID',\n subsets = list(),\n exploratory_var = 'ID',\n order = ''\n ),\n # Analysis 2\n # Description:\n list(\n name = 'Treatment',\n subsets = list(),\n exploratory_var = 'Treatment',\n order = ''\n ),\n # Analysis 3\n # Description:\n list(\n name = 'Packaging',\n subsets = list(),\n exploratory_var = 'Packaging',\n order = ''\n )\n)\n\n", "_____no_output_____" ] ], [ [ "# Zero-inflated Gaussian model", "_____no_output_____" ] ], [ [ "# Each analyses you wish to perform should have its own list in the following\n# statistical_analyses list. A template is provided to get you started.\n# Multiple analyses, subsets, and contrasts are valid, but only one random\n# effect can be used per analysis. The contrasts of interest must have their\n# parent variable in the model matrix equation. Contrasts are named by\n# parent variable then child variable without a space inbetween, for example:\n# PVar1Cvar1 where the model matrix equation is ~ 0 + Pvar1.\nAMR_statistical_analyses = list(\n # Analysis 1\n # Description: \n list(\n name = 'Treatment',\n subsets = list(),\n model_matrix = '~ 0 + Treatment ',\n contrasts = list('TreatmentCONV - TreatmentRWA'),\n random_effect = NA\n )\n)\n\nmicrobiome_statistical_analyses = list(\n # Analysis 1\n # Description: \n list(\n name = 'Treatment',\n subsets = list(),\n model_matrix = '~ 0 + Treatment ',\n contrasts = list('TreatmentCONV - TreatmentRWA'),\n random_effect = NA\n )\n)\n", "_____no_output_____" ] ], [ [ "# Run main script to get convenient R objects for further analysis\n\n* You have to select which script you need to run, based on what data you are providing. In the example data provided, we used qiime2 for microbiome analysis and AMR++ with the megares database. Therefore, we will run this script:\n * source('scripts/metagenomeSeq_megares_qiime.R')\n* This is the other option (more in development):\n * source('scripts/metagenomeSeq_megares_kraken.R')\n \n### After running the next code block, you can explore your data using following R objects\n* AMR\n * amr_melted_analytic/amr_raw_melted_analytic\n * Object of all counts in long form\n * AMR_analytic_data\n * List of MRexperiment objects at each level; Class, Mechanism, Group, Gene\n* Microbiome\n * microbiome_melted_analytic/microbiome_raw_melted_analytic\n * microbiome_analytic_data", "_____no_output_____" ] ], [ [ "#### If 16S microbiome and megares analysis, run:\nsource('scripts/metagenomeSeq_megares_qiime.R')", "Default value being used.\nDefault value being used.\nWarning message in melt(D, variable.name = \"Sample\", value.name = \"Normalized_Count\"):\n“The melt generic in data.table has been passed a matrix and will attempt to redirect to the relevant reshape2 method; please note that reshape2 is deprecated, and this redirection is now deprecated as well. To continue using melt methods from reshape2 while both libraries are attached, e.g. melt.list, you can prepend the namespace like reshape2::melt(D). In the next version, this warning will become an error.”Warning message in melt(D, variable.name = \"Sample\", value.name = \"Normalized_Count\"):\n“The melt generic in data.table has been passed a matrix and will attempt to redirect to the relevant reshape2 method; please note that reshape2 is deprecated, and this redirection is now deprecated as well. To continue using melt methods from reshape2 while both libraries are attached, e.g. melt.list, you can prepend the namespace like reshape2::melt(D). In the next version, this warning will become an error.”Warning message in melt(D, variable.name = \"Sample\", value.name = \"Normalized_Count\"):\n“The melt generic in data.table has been passed a matrix and will attempt to redirect to the relevant reshape2 method; please note that reshape2 is deprecated, and this redirection is now deprecated as well. To continue using melt methods from reshape2 while both libraries are attached, e.g. melt.list, you can prepend the namespace like reshape2::melt(D). In the next version, this warning will become an error.”Warning message in melt(D, variable.name = \"Sample\", value.name = \"Normalized_Count\"):\n“The melt generic in data.table has been passed a matrix and will attempt to redirect to the relevant reshape2 method; please note that reshape2 is deprecated, and this redirection is now deprecated as well. To continue using melt methods from reshape2 while both libraries are attached, e.g. melt.list, you can prepend the namespace like reshape2::melt(D). In the next version, this warning will become an error.”Warning message in melt(D, variable.name = \"Sample\", value.name = \"Normalized_Count\"):\n“The melt generic in data.table has been passed a matrix and will attempt to redirect to the relevant reshape2 method; please note that reshape2 is deprecated, and this redirection is now deprecated as well. To continue using melt methods from reshape2 while both libraries are attached, e.g. melt.list, you can prepend the namespace like reshape2::melt(D). In the next version, this warning will become an error.”Warning message in melt(D, variable.name = \"Sample\", value.name = \"Normalized_Count\"):\n“The melt generic in data.table has been passed a matrix and will attempt to redirect to the relevant reshape2 method; please note that reshape2 is deprecated, and this redirection is now deprecated as well. To continue using melt methods from reshape2 while both libraries are attached, e.g. melt.list, you can prepend the namespace like reshape2::melt(D). In the next version, this warning will become an error.”Warning message in melt(D, variable.name = \"Sample\", value.name = \"Normalized_Count\"):\n“The melt generic in data.table has been passed a matrix and will attempt to redirect to the relevant reshape2 method; please note that reshape2 is deprecated, and this redirection is now deprecated as well. To continue using melt methods from reshape2 while both libraries are attached, e.g. melt.list, you can prepend the namespace like reshape2::melt(D). In the next version, this warning will become an error.”Warning message in melt(D, variable.name = \"Sample\", value.name = \"Normalized_Count\"):\n“The melt generic in data.table has been passed a matrix and will attempt to redirect to the relevant reshape2 method; please note that reshape2 is deprecated, and this redirection is now deprecated as well. To continue using melt methods from reshape2 while both libraries are attached, e.g. melt.list, you can prepend the namespace like reshape2::melt(D). In the next version, this warning will become an error.”Warning message in melt(D, variable.name = \"Sample\", value.name = \"Normalized_Count\"):\n“The melt generic in data.table has been passed a matrix and will attempt to redirect to the relevant reshape2 method; please note that reshape2 is deprecated, and this redirection is now deprecated as well. To continue using melt methods from reshape2 while both libraries are attached, e.g. melt.list, you can prepend the namespace like reshape2::melt(D). In the next version, this warning will become an error.”Warning message in melt(D, variable.name = \"Sample\", value.name = \"Normalized_Count\"):\n“The melt generic in data.table has been passed a matrix and will attempt to redirect to the relevant reshape2 method; please note that reshape2 is deprecated, and this redirection is now deprecated as well. To continue using melt methods from reshape2 while both libraries are attached, e.g. melt.list, you can prepend the namespace like reshape2::melt(D). In the next version, this warning will become an error.”Warning message in melt(D, variable.name = \"Sample\", value.name = \"Normalized_Count\"):\n“The melt generic in data.table has been passed a matrix and will attempt to redirect to the relevant reshape2 method; please note that reshape2 is deprecated, and this redirection is now deprecated as well. To continue using melt methods from reshape2 while both libraries are attached, e.g. melt.list, you can prepend the namespace like reshape2::melt(D). In the next version, this warning will become an error.”Warning message in melt(D, variable.name = \"Sample\", value.name = \"Normalized_Count\"):\n“The melt generic in data.table has been passed a matrix and will attempt to redirect to the relevant reshape2 method; please note that reshape2 is deprecated, and this redirection is now deprecated as well. To continue using melt methods from reshape2 while both libraries are attached, e.g. melt.list, you can prepend the namespace like reshape2::melt(D). In the next version, this warning will become an error.”Warning message in melt(D, variable.name = \"Sample\", value.name = \"Normalized_Count\"):\n“The melt generic in data.table has been passed a matrix and will attempt to redirect to the relevant reshape2 method; please note that reshape2 is deprecated, and this redirection is now deprecated as well. To continue using melt methods from reshape2 while both libraries are attached, e.g. melt.list, you can prepend the namespace like reshape2::melt(D). In the next version, this warning will become an error.”Warning message in melt(D, variable.name = \"Sample\", value.name = \"Normalized_Count\"):\n“The melt generic in data.table has been passed a matrix and will attempt to redirect to the relevant reshape2 method; please note that reshape2 is deprecated, and this redirection is now deprecated as well. To continue using melt methods from reshape2 while both libraries are attached, e.g. melt.list, you can prepend the namespace like reshape2::melt(D). In the next version, this warning will become an error.”Warning message in melt(D, variable.name = \"Sample\", value.name = \"Normalized_Count\"):\n“The melt generic in data.table has been passed a matrix and will attempt to redirect to the relevant reshape2 method; please note that reshape2 is deprecated, and this redirection is now deprecated as well. To continue using melt methods from reshape2 while both libraries are attached, e.g. melt.list, you can prepend the namespace like reshape2::melt(D). In the next version, this warning will become an error.”Warning message in melt(D, variable.name = \"Sample\", value.name = \"Normalized_Count\"):\n“The melt generic in data.table has been passed a matrix and will attempt to redirect to the relevant reshape2 method; please note that reshape2 is deprecated, and this redirection is now deprecated as well. To continue using melt methods from reshape2 while both libraries are attached, e.g. melt.list, you can prepend the namespace like reshape2::melt(D). In the next version, this warning will become an error.”Warning message in melt(D, variable.name = \"Sample\", value.name = \"Normalized_Count\"):\n“The melt generic in data.table has been passed a matrix and will attempt to redirect to the relevant reshape2 method; please note that reshape2 is deprecated, and this redirection is now deprecated as well. To continue using melt methods from reshape2 while both libraries are attached, e.g. melt.list, you can prepend the namespace like reshape2::melt(D). In the next version, this warning will become an error.”Warning message in melt(D, variable.name = \"Sample\", value.name = \"Normalized_Count\"):\n“The melt generic in data.table has been passed a matrix and will attempt to redirect to the relevant reshape2 method; please note that reshape2 is deprecated, and this redirection is now deprecated as well. To continue using melt methods from reshape2 while both libraries are attached, e.g. melt.list, you can prepend the namespace like reshape2::melt(D). In the next version, this warning will become an error.”Warning message in melt(D, variable.name = \"Sample\", value.name = \"Normalized_Count\"):\n“The melt generic in data.table has been passed a matrix and will attempt to redirect to the relevant reshape2 method; please note that reshape2 is deprecated, and this redirection is now deprecated as well. To continue using melt methods from reshape2 while both libraries are attached, e.g. melt.list, you can prepend the namespace like reshape2::melt(D). In the next version, this warning will become an error.”Warning message in melt(D, variable.name = \"Sample\", value.name = \"Normalized_Count\"):\n“The melt generic in data.table has been passed a matrix and will attempt to redirect to the relevant reshape2 method; please note that reshape2 is deprecated, and this redirection is now deprecated as well. To continue using melt methods from reshape2 while both libraries are attached, e.g. melt.list, you can prepend the namespace like reshape2::melt(D). In the next version, this warning will become an error.”Warning message in melt(D, variable.name = \"Sample\", value.name = \"Normalized_Count\"):\n“The melt generic in data.table has been passed a matrix and will attempt to redirect to the relevant reshape2 method; please note that reshape2 is deprecated, and this redirection is now deprecated as well. To continue using melt methods from reshape2 while both libraries are attached, e.g. melt.list, you can prepend the namespace like reshape2::melt(D). In the next version, this warning will become an error.”Warning message in melt(D, variable.name = \"Sample\", value.name = \"Normalized_Count\"):\n“The melt generic in data.table has been passed a matrix and will attempt to redirect to the relevant reshape2 method; please note that reshape2 is deprecated, and this redirection is now deprecated as well. To continue using melt methods from reshape2 while both libraries are attached, e.g. melt.list, you can prepend the namespace like reshape2::melt(D). In the next version, this warning will become an error.”" ] ], [ [ "# Print exploratory figures\n* ## Don't use these figures for publication unless you fully understand how the functions in the script, \"meg_utility_functions.R\", processes your data.\n", "_____no_output_____" ] ], [ [ "## Run code to make some exploratory figures, zero inflated gaussian model, and output count matrices.\nsuppressMessages(source('scripts/print_figures.R'))", "Warning message in check_pal_n(n, max_n):\n“This palette can handle a maximum of 20 values.You have supplied 32.”Warning message in check_pal_n(n, max_n):\n“This palette can handle a maximum of 20 values.You have supplied 32.”Warning message in check_pal_n(n, max_n):\n“This palette can handle a maximum of 20 values.You have supplied 32.”Warning message in check_pal_n(n, max_n):\n“This palette can handle a maximum of 20 values.You have supplied 32.”Warning message in check_pal_n(n, max_n):\n“This palette can handle a maximum of 20 values.You have supplied 32.”" ] ], [ [ "# Everything after this is where we can get creative to summarize our results. \n## Area to show them how to play around with ggplot2\n \n\nFirst, combine the normalized count tables with the metadata file.", "_____no_output_____" ] ], [ [ "head(amr_melted_analytic)", "_____no_output_____" ], [ "### Start of code for figures, combine table objects to include meta\nsetkey(amr_melted_raw_analytic,ID) \nsetkey(amr_melted_analytic,ID) \n\nsetkey(microbiome_melted_analytic,ID)\n# Set keys for both metadata files\nsetkey(metadata,ID)\nsetkey(microbiome_metadata,ID)\nmicrobiome_melted_analytic <- microbiome_melted_analytic[microbiome_metadata]\namr_melted_raw_analytic <- amr_melted_raw_analytic[metadata]\namr_melted_analytic <- amr_melted_analytic[metadata]", "_____no_output_____" ], [ "head(amr_melted_analytic)", "_____no_output_____" ] ], [ [ "# Create plots below", "_____no_output_____" ] ], [ [ "## Figure showing resistome composition\nAMR_class_sum <- amr_melted_analytic[Level_ID==\"Class\", .(sum_class= sum(Normalized_Count)),by=.(ID, Name, Packaging, Treatment)][order(-Packaging )]\nAMR_class_sum[,total:= sum(sum_class), by=.(ID)]\nAMR_class_sum[,percentage:= sum_class/total ,by=.(ID, Name) ]\nAMR_class_sum$Class <- AMR_class_sum$Name\nfig1 <- ggplot(AMR_class_sum, aes(x = ID, y = percentage, fill = Class)) + \n geom_bar(stat = \"identity\",colour = \"black\")+\n facet_wrap( ~ Treatment, scales='free',ncol = 2) +\n theme(\n panel.grid.major=element_blank(),\n panel.grid.minor=element_blank(),\n strip.text.x=element_text(size=22),\n strip.text.y=element_text(size=22, angle=0),\n axis.text.x=element_blank(),\n axis.text.y=element_text(size=20),\n axis.title=element_text(size=22),\n legend.position=\"right\",\n panel.spacing=unit(0.1, \"lines\"),\n plot.title=element_text(size=22, hjust=0.5),\n legend.text=element_text(size=10),\n legend.title=element_text(size=20),\n panel.background = element_rect(fill = \"white\")\n ) +\n ggtitle(\"\\t\\tResistome composition by sample\") +\n xlab('Sample') +\n ylab('Relative abundance') +\n scale_fill_tableau(\"Tableau 20\") \nfig1", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
4a797cc90a21a7ef46868a72c422cc7b6097c009
22,141
ipynb
Jupyter Notebook
Tutorial-Metadaten/structureData_task.ipynb
Fuenfgeld/2022TeamADataManagementBC
a7e8c1809197bd8d9f67edc1cb41f3e2ae7d5422
[ "MIT" ]
null
null
null
Tutorial-Metadaten/structureData_task.ipynb
Fuenfgeld/2022TeamADataManagementBC
a7e8c1809197bd8d9f67edc1cb41f3e2ae7d5422
[ "MIT" ]
null
null
null
Tutorial-Metadaten/structureData_task.ipynb
Fuenfgeld/2022TeamADataManagementBC
a7e8c1809197bd8d9f67edc1cb41f3e2ae7d5422
[ "MIT" ]
null
null
null
33.34488
693
0.551737
[ [ [ "<a href=\"https://colab.research.google.com/github/Fuenfgeld/2022TeamADataManagementBC/blob/main/Tutorial-Metadaten/structureData_task.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Strukturelle Daten und Metadatenschema\n\n#### REFERENCE MODEL FOR AN OPEN ARCHIVAL INFORMATION SYSTEM (OASI)\n\n![REFERENCE MODEL FOR AN\nOPEN ARCHIVAL\nINFORMATION SYSTEM (OASI)](data/OASI_chard.jpg)", "_____no_output_____" ], [ "Sturcture Information, definiton by [Open Archival Information System](https://public.ccsds.org/pubs/650x0m2.pdf):\n\n- It does this by **describing the format**, or data structure concepts, which are to be applied to the bit sequences and that in turn result in more meaningful values such as characters, numbers, pixels, arrays, tables, etc.", "_____no_output_____" ], [ "- These common computer **data types**, **aggregations** of these data types, and **mapping rules** which map from the underlying data types to the higher level concepts needed to understand the Digital Object are referred to as the Structure Information of the Representation Information object.", "_____no_output_____" ], [ "Beispiel: \n- Ein Verweis auf den ASCII-Standard (ISO 9660), um bits in characters zu interpretieren.\n- Ein Verweis auf ISO/TS 22028-4 (Digital images encoded using eciRGB) um bits in Bilder zu interpretieren.", "_____no_output_____" ], [ "## import required libraries", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "## Data exploration\n\nOrginal Daten können, wie angesprochen unter https://www.kaggle.com/datasets/sulianova/cardiovascular-disease-datasetOr\ngefunden werden.\n\nFeatures:\n\n- Age | Objective Feature | age | int (days)\n- Height | Objective Feature | height | int (cm) |\n- Weight | Objective Feature | weight | float (kg) |\n- Systolic blood pressure | Examination Feature | ap_hi | int |\n- Diastolic blood pressure | Examination Feature | ap_lo | int |\n- Smoking | Subjective Feature | smoke | 0-2 |\n- Presence or absence of cardiovascular disease | Target Variable | cardio | 0-9 |", "_____no_output_____" ] ], [ [ "df = pd.read_csv(\"./data/new_data.csv\", sep = ',', index_col = 0)", "_____no_output_____" ], [ "df.head(5)", "_____no_output_____" ] ], [ [ "## Was bedeuten diese Daten?\n\n![Was bedeuten diese Daten?](data/metadata_question.jpg)", "_____no_output_____" ], [ "![Questions?](data/question.jpg)", "_____no_output_____" ], [ "### Metadatenschema\n- Es gibt viele Arten von Metadatenstandards/-schemata(generisch/domänenspezifisch).\n- Generische: [Dublin Core](https://www.dublincore.org/), [MODS](http://format.gbv.de/mods) (Metadata Object Description Schema) sind in der Regel einfach zu verwenden und weit verbreitet, müssen jedoch häufig erweitert werden, um spezifischere Informationen abzudecken.\n- Domänenspezifische Schemata: Haben ein viel reichhaltigeres Vokabular und eine viel umfangreichere Struktur, sind jedoch in der Regel hochspezialisiert und nur für Forscher auf diesem Gebiet verständlich. [Beispiele here](https://fairsharing.org/search?fairsharingRegistry=Standard)", "_____no_output_____" ], [ "## [Dublin Core](https://www.dublincore.org/specifications/dublin-core/dcmi-terms/#section-1)\n\n- Dublin Core geht auf die sogenannte Dublin Core Metadata Initiative(DCMI)\n- 1994 in Chicago gegründet\n- 1995 in Dublin einheitliche Standards zur Auszeichnung von Metaangaben definiert.\n- Ziel: Suchmaschinen das Durchsuchen von Dokumenten zu erleichtern, indem wichtige Inhalte bereits in den Metadaten hinterlegt sind. \n- Verwendung wo Suchmaschinen genutzt werden: Internet, Bibliotheken, Verwaltungen oder Museen.\n- Heute werden die Standards von einer Gruppe aus Freiwilligen weiter bearbeitet. ", "_____no_output_____" ], [ "### Einteilung in 15 core elements [here](https://www.dublincore.org/specifications/dublin-core/dcmi-terms/):\n- contributor\n- coverage\n- creator\n- date\n- description\n- format\n- identifier\n- language\n- publisher\n- relation\n- rights\n- source", "_____no_output_____" ], [ "- **contributor (Beitragende)** *WHO?*: <br>\nNennen der Person(en) oder Organisation(en), die bei der Erstellung der Ressource (Content) mitgewirkt haben.\n \n- **coverage (Ort und Zeit)** *WHERE/WHEN?*: <br>\nAn dieser Stelle werden Informationen zum [Ort](http://www.getty.edu/research/tools/vocabularies/tgn/?find=&place=Heidelberg&nation=&prev_page=1&english=Y&popup=P) und zeitlichen Gültigkeitsbereich abgelegt. Hierbei sollen für Orte die gültigen Namen und für die temporäre Dauer Zeitintervalle (z.B. 07.07 - 12.07) verwendet werden.\n \n- **creator (Ersteller)** *WHO?*: <br>\nNennen des ursprünglichen Autors einer Ressource. Autoren können Person(en) und Organisation(en) sein.", "_____no_output_____" ], [ "- **[date (Datum)](https://www.w3.org/TR/NOTE-datetime)** *WHEN?*:<br>\nHinterlegen von Informationen bezüglich Erstellungsdatum, Änderungsdatum, Sperrfrist und Löschdatum.\n \n- **description (Beschreibung)** *WHY/WHAT?*:<br>\nZusätzliche Informationen, die die Ressource noch näher beschreiben. Hierzu zählen z.B. eine Kurzfassung oder ein Inhaltsverzeichnis.\n \n- **format (Format)** *WHAT/HOW?*:<br>\nAngaben zum [MIME-Typ](https://www.iana.org/assignments/media-types/media-types.xhtml) der Ressource wie Pixelgröße, Dateiformat, Bearbeitungsdauer, usw.", "_____no_output_____" ], [ "- **identifier (Identifizierer)** *WHAT?*: <br>\nDieses Element enthält einen eindeutigen Bezeichner für die Ressource z.B. eine URL([DOI](https://www.doi.org/)), Artikelnummer oder UID.\n \n- **language (Sprache)** *WHAT/HOW?*:<br>\nHinterlegen eines Sprachecodes. Hierfür sollen Sprachcodes nach [ISO 639](https://www.loc.gov/standards/iso639-2/php/code_list.php) oder RFC 3066 verwendet werden.\n \n- **publisher (Verlag/Herausgeber)** *WHO?*: <br>\nEnthält Informationen über den Verleger. Der Verleger können Person(en) oder Organisation(en) sein.", "_____no_output_____" ], [ "- **relation (Beziehungen)** *WHAT?*:<br>\nHier werden Informationen über Beziehungen zu anderen Ressourcen festgehalten.\n \n- **rights (Rechte)** *WHO/WHERE?*: <br>\nAn dieser Stelle werden Informationen bezüglich den Rechten an Ressourcen hinterlegt. Zum Beispiel über den Urheber oder die [Lizenzart](https://opensource.org/licenses) (GPL, LGPL, ZPL usw.).\n \n- **source (Quelle)** *WHAT?*: <br>\nEine verwandte Ressource, von der die beschriebene Ressource abgeleitet ist. Die beschriebene Ressource kann ganz oder teilweise von der verwandten Ressource abgeleitet sein.", "_____no_output_____" ], [ " \n- **subject (Stichwörter)** *WHAT?*:<br>\nHier können Stichwörter oder ganze identifizierende Phrasen zu einer Ressource hinterlegt werden.\n \n- **title (Titel)** *WHAT?*:<br>\nHinterlegen des Ressourcentitels (z.B. Dokumenttitel).\n \n- **[type (Typ)](https://www.dublincore.org/specifications/dublin-core/dcmi-terms/#section-7)** *WHAT/HOW?*:<br>\nÜber den Typ wird einer Ressource eine Medienkategorie wie Bild, Artikel, Ordner usw. zugeordnet.", "_____no_output_____" ], [ "### Aufgabe\n\nFassen Sie die noch fehlenden \"Core Elemente\" unter der Verwendung der verlinkten Codierung-Standards für den vorgestellten Datensatz zusammen. Zusätzliche Informationen zum Datensatz können [hier](https://github.com/Fuenfgeld/2022TeamADataManagementBC/wiki/3.-Datensatz#zus%C3%A4tzliche-beschreibung) entnommen werden.\n\nBeispielhafte Codierung-Standards:\n- [Thesaurus of Geographic Names (TGN)](http://www.getty.edu/research/tools/vocabularies/tgn/?find=&place=Heidelberg&nation=&prev_page=1&english=Y&popup=P)\n\n- [Date and Time Formats](https://www.w3.org/TR/NOTE-datetime)\n\n- [Media types](https://www.iana.org/assignments/media-types/media-types.xhtml)\n\n- [Codes for the Representation of Names of Languages (ISO 639-2)](https://www.loc.gov/standards/iso639-2/php/code_list.php)\n\n- [List of popular Licenses](https://opensource.org/licenses)", "_____no_output_____" ], [ "### XML-Schme\n\n- [Guidlines](https://www.dublincore.org/specifications/dublin-core/dc-xml-guidelines/2003-04-02/)\n- Unterschied **Simple** und **Qulified** Dublin Core", "_____no_output_____" ], [ "#### Simple Dublin Core\n\n- Besteht aus einer oder mehreren Eigenschaften und den zugehörigen Werten.\n- Jede Eigenschaft ist ein Attribut der beschriebenen Ressource.\n- Jede Eigenschaft muss eines der 15 DCMES [DCMES]-Elemente sein.\n- Eigenschaften können wiederholt werden.\n- Jeder Wert ist ein String.\n- Jeder String-Wert kann eine zugeordnete Sprache haben (z.B. en-GB).\n", "_____no_output_____" ], [ "```xml\n<?xml version=\"1.0\"?>\n\n<metadata\n xmlns=\"http://example.org/myapp/\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xsi:schemaLocation=\"http://example.org/myapp/ http://example.org/myapp/schema.xsd\"\n xmlns:dc=\"http://purl.org/dc/elements/1.1/\">\n\n <dc:title>\n UKOLN\n </dc:title>\n <dc:description>\n UKOLN is a national focus of expertise in digital information\n management. It provides policy, research and awareness services\n to the UK library, information and cultural heritage communities.\n UKOLN is based at the University of Bath.\n </dc:description>\n <dc:publisher>\n UKOLN, University of Bath\n </dc:publisher>\n <dc:identifier>\n http://www.ukoln.ac.uk/\n </dc:identifier>\n\n</metadata>\n```", "_____no_output_____" ], [ "#### Qualified Dublin Core\n\n\n- Besteht aus einer oder mehreren Eigenschaften und den zugehörigen Werten. **✓**\n- Jede Eigenschaft ist ein Attribut der beschriebenen Ressource. **✓**\n- Jede Eigenschaft muss entweder:\n - eines der 15 DC-Elemente, **✓**\n - eines der anderen vom DCMI empfohlenen Elemente (z. B. Publikum) [DCTERMS],\n - eine der Elementverfeinerungen, die in der Empfehlung der DCMI-Metadatenbedingungen [DCTERMS] aufgeführt sind.\n- Eigenschaften können wiederholt werden. **✓**\n- Jeder Wert ist eine String. **✓**\n- Jeder Wert kann ein zugeordnetes Codierungsschema haben.\n- Jedes Kodierungsschema hat einen Namen.\n- Jeder String-Wert kann eine zugeordnete Sprache haben (z. B. en-GB). **✓**\n", "_____no_output_____" ], [ "```xml\n<?xml version=\"1.0\"?>\n\n<metadata\n xmlns=\"http://example.org/myapp/\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xsi:schemaLocation=\"http://example.org/myapp/ http://example.org/myapp/schema.xsd\"\n xmlns:dc=\"http://purl.org/dc/elements/1.1/\"\n xmlns:dcterms=\"http://purl.org/dc/terms/\">\n\n <dc:title>\n UKOLN\n </dc:title>\n <dcterms:alternative>\n UK Office for Library and Information Networking\n </dcterms:alternative>\n <dc:subject>\n national centre, network information support, library\n community, awareness, research, information services,public\n library networking, bibliographic management, distributed\n library systems, metadata, resource discovery,\n conferences,lectures, workshops\n </dc:subject>\n <dc:subject xsi:type=\"dcterms:DDC\">\n 062\n </dc:subject>\n <dc:subject xsi:type=\"dcterms:UDC\">\n 061(410)\n </dc:subject>\n <dc:description>\n UKOLN is a national focus of expertise in digital information\n management. It provides policy, research and awareness services\n to the UK library, information and cultural heritage communities.\n UKOLN is based at the University of Bath.\n </dc:description>\n <dc:description xml:lang=\"fr\">\n UKOLN est un centre national d'expertise dans la gestion de l'information\n digitale.\n </dc:description>\n <dc:publisher>\n UKOLN, University of Bath\n </dc:publisher>\n <dcterms:isPartOf xsi:type=\"dcterms:URI\">\n http://www.bath.ac.uk/\n </dcterms:isPartOf>\n <dc:identifier xsi:type=\"dcterms:URI\">\n http://www.ukoln.ac.uk/\n </dc:identifier>\n <dcterms:modified xsi:type=\"dcterms:W3CDTF\">\n 2001-07-18\n </dcterms:modified>\n <dc:format xsi:type=\"dcterms:IMT\">\n text/html\n </dc:format>\n <dcterms:extent>\n 14 Kbytes\n </dcterms:extent>\n\n</metadata>\n```", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
4a797d245c9692ccaaad9526c33f8b2409077bec
297,741
ipynb
Jupyter Notebook
H1N1andSeasonalFluVaccines_Categorical.ipynb
DAndresSanchez/H1N1andSeasonalFluVaccines
e28e24269b78dc8372af8a2133acb727768e7955
[ "MIT" ]
1
2020-09-30T15:36:26.000Z
2020-09-30T15:36:26.000Z
H1N1andSeasonalFluVaccines_Categorical.ipynb
DAndresSanchez/H1N1andSeasonalFluVaccines
e28e24269b78dc8372af8a2133acb727768e7955
[ "MIT" ]
null
null
null
H1N1andSeasonalFluVaccines_Categorical.ipynb
DAndresSanchez/H1N1andSeasonalFluVaccines
e28e24269b78dc8372af8a2133acb727768e7955
[ "MIT" ]
null
null
null
71.520778
86,056
0.630689
[ [ [ "# Predict H1N1 and Seasonal Flu Vaccines", "_____no_output_____" ], [ "## Preprocessing", "_____no_output_____" ], [ "### Import libraries", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\n%matplotlib inline\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "### Import data", "_____no_output_____" ] ], [ [ "features_raw_df = pd.read_csv(\"data/training_set_features.csv\", index_col=\"respondent_id\")\nlabels_raw_df = pd.read_csv(\"data/training_set_labels.csv\", index_col=\"respondent_id\")", "_____no_output_____" ], [ "print(\"features_raw_df.shape\", features_raw_df.shape)\nfeatures_raw_df.head()", "features_raw_df.shape (26707, 35)\n" ], [ "features_raw_df.dtypes", "_____no_output_____" ], [ "print(\"labels_raw_df.shape\", labels_raw_df.shape)\nlabels_raw_df.head()", "labels_raw_df.shape (26707, 2)\n" ], [ "labels_raw_df.dtypes", "_____no_output_____" ], [ "features_df = features_raw_df.copy()\nlabels_df = labels_raw_df.copy()", "_____no_output_____" ] ], [ [ "### Exploratory Data Analysis", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(2, 1, sharex=True)\n\nn_entries = labels_df.shape[0]\n\n(labels_df['h1n1_vaccine'].value_counts().div(n_entries)\n .plot.barh(title=\"Proportion of H1N1 Vaccine\", ax=ax[0]))\nax[0].set_ylabel(\"seasonal_vaccine\")\n\n(labels_df['seasonal_vaccine'].value_counts().div(n_entries)\n .plot.barh(title=\"Proportion of H1N1 Vaccine\", ax=ax[1]))\nax[1].set_ylabel(\"seasonal_vaccine\")\n\nfig.tight_layout()", "_____no_output_____" ], [ "pd.crosstab(\n labels_df[\"h1n1_vaccine\"], \n labels_df[\"seasonal_vaccine\"], \n margins=True,\n normalize=True\n)", "_____no_output_____" ], [ "(labels_df[\"h1n1_vaccine\"]\n .corr(labels_df[\"seasonal_vaccine\"], method=\"pearson\")\n)", "_____no_output_____" ] ], [ [ "### Features", "_____no_output_____" ] ], [ [ "df = features_df.join(labels_df)\nprint(df.shape)\ndf.head()", "(26707, 37)\n" ], [ "h1n1_concern_vaccine = df[['h1n1_concern', 'h1n1_vaccine']].groupby(['h1n1_concern', 'h1n1_vaccine']).size().unstack()\nh1n1_concern_vaccine", "_____no_output_____" ], [ "ax = h1n1_concern_vaccine.plot.barh()\nax.invert_yaxis()", "_____no_output_____" ], [ "h1n1_concern_counts = h1n1_concern_vaccine.sum(axis='columns')\nh1n1_concern_counts", "_____no_output_____" ], [ "h1n1_concern_vaccine_prop = h1n1_concern_vaccine.div(h1n1_concern_counts, axis='index')\nh1n1_concern_vaccine_prop", "_____no_output_____" ], [ "ax = h1n1_concern_vaccine_prop.plot.barh(stacked=True)\nax.invert_yaxis()\nax.legend(loc='center left', bbox_to_anchor=(1.05, 0.5), title='h1n1_vaccine')\nplt.show()", "_____no_output_____" ], [ "def vaccination_rate_plot(vaccine, feature, df, ax=None):\n\n feature_vaccine = df[[feature, vaccine]].groupby([feature, vaccine]).size().unstack()\n counts = feature_vaccine.sum(axis='columns')\n proportions = feature_vaccine.div(counts, axis='index')\n\n ax = proportions.plot.barh(stacked=True, ax=ax)\n ax.invert_yaxis()\n ax.legend(loc='center left', bbox_to_anchor=(1.05, 0.5), title=vaccine)\n ax.legend().remove()", "_____no_output_____" ], [ "vaccination_rate_plot('seasonal_vaccine', 'h1n1_concern', df)", "_____no_output_____" ], [ "cols_to_plot = [\n 'h1n1_concern',\n 'h1n1_knowledge',\n 'opinion_h1n1_vacc_effective',\n 'opinion_h1n1_risk',\n 'opinion_h1n1_sick_from_vacc',\n 'opinion_seas_vacc_effective',\n 'opinion_seas_risk',\n 'opinion_seas_sick_from_vacc',\n 'sex',\n 'age_group',\n 'race',\n]\n\nfig, ax = plt.subplots(len(cols_to_plot), 2, figsize=(10,len(cols_to_plot)*2.5))\n\n\nfor idx, col in enumerate(cols_to_plot):\n \n vaccination_rate_plot('h1n1_vaccine', col, df, ax=ax[idx, 0])\n vaccination_rate_plot('seasonal_vaccine', col, df, ax=ax[idx, 1])\n \n ax[0, 0].legend(loc='lower center', bbox_to_anchor=(0.5, 1.05), title='h1n1_vaccine')\n ax[0, 1].legend(loc='lower center', bbox_to_anchor=(0.5, 1.05), title='seasonal_vaccine')\n \nfig.tight_layout()", "_____no_output_____" ] ], [ [ "### Categorical columns", "_____no_output_____" ] ], [ [ "features_df = features_raw_df.copy()\nlabels_df = labels_raw_df.copy()", "_____no_output_____" ], [ "features_df.dtypes == object", "_____no_output_____" ], [ "# All categorical columns considered apart from employment-related\ncategorical_cols = features_df.columns[features_df.dtypes == \"object\"].values[:-2]\ncategorical_cols", "_____no_output_____" ], [ "categorical_cols = np.delete(categorical_cols, np.where(categorical_cols == 'hhs_geo_region'))\ncategorical_cols", "_____no_output_____" ], [ "features_df.employment_occupation.unique()", "_____no_output_____" ], [ "features_df.hhs_geo_region.unique()", "_____no_output_____" ], [ "features_df[categorical_cols].head()", "_____no_output_____" ], [ "for col in categorical_cols:\n col_dummies = pd.get_dummies(features_df[col], drop_first = True)\n features_df = features_df.drop(col, axis=1)\n features_df = pd.concat([features_df, col_dummies], axis=1)\n", "_____no_output_____" ], [ "features_df.head()", "_____no_output_____" ], [ "features_df.isna().sum()", "_____no_output_____" ], [ "def preprocess_categorical(df):\n categorical_cols = df.columns[df.dtypes == \"object\"].values[:-2]\n categorical_cols = np.delete(categorical_cols, np.where(categorical_cols == 'hhs_geo_region'))\n \n for col in categorical_cols:\n col_dummies = pd.get_dummies(df[col], drop_first = True)\n df = df.drop(col, axis=1)\n df = pd.concat([df, col_dummies], axis=1)\n \n df = df.drop(['hhs_geo_region', 'employment_industry', 'employment_occupation'], axis=1)\n \n return df", "_____no_output_____" ] ], [ [ "## MACHINE LEARNING", "_____no_output_____" ], [ "### Machine Learning Model", "_____no_output_____" ] ], [ [ "from sklearn.preprocessing import StandardScaler\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.compose import ColumnTransformer\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.multioutput import MultiOutputClassifier\n\nfrom sklearn.pipeline import Pipeline\n\nfrom sklearn.model_selection import train_test_split\n\nfrom sklearn.metrics import roc_curve, roc_auc_score\n\nRANDOM_SEED = 6 ", "_____no_output_____" ], [ "features_raw_df.dtypes != \"object\"", "_____no_output_____" ], [ "numeric_cols = features_raw_df.columns[features_raw_df.dtypes != \"object\"].values\nprint(numeric_cols)", "['h1n1_concern' 'h1n1_knowledge' 'behavioral_antiviral_meds'\n 'behavioral_avoidance' 'behavioral_face_mask' 'behavioral_wash_hands'\n 'behavioral_large_gatherings' 'behavioral_outside_home'\n 'behavioral_touch_face' 'doctor_recc_h1n1' 'doctor_recc_seasonal'\n 'chronic_med_condition' 'child_under_6_months' 'health_worker'\n 'health_insurance' 'opinion_h1n1_vacc_effective' 'opinion_h1n1_risk'\n 'opinion_h1n1_sick_from_vacc' 'opinion_seas_vacc_effective'\n 'opinion_seas_risk' 'opinion_seas_sick_from_vacc' 'household_adults'\n 'household_children']\n" ] ], [ [ "### Features Preprocessing", "_____no_output_____" ] ], [ [ "# chain preprocessing into a Pipeline object\nnumeric_preprocessing_steps = Pipeline([\n ('standard_scaler', StandardScaler()),\n ('simple_imputer', SimpleImputer(strategy='median'))\n])", "_____no_output_____" ], [ "# create the preprocessor stage of final pipeline\npreprocessor = ColumnTransformer(\n transformers = [\n (\"numeric\", numeric_preprocessing_steps, numeric_cols)\n ],\n remainder = \"passthrough\"\n)", "_____no_output_____" ], [ "estimators = MultiOutputClassifier(\n estimator=LogisticRegression(penalty=\"l2\", C=1)\n)", "_____no_output_____" ], [ "full_pipeline = Pipeline([\n (\"preprocessor\", preprocessor),\n (\"estimators\", estimators),\n])", "_____no_output_____" ], [ "features_df_trans = preprocess_categorical(features_df)", "_____no_output_____" ], [ "X_train, X_test, y_train, y_test = train_test_split(\n features_df_trans,\n labels_df,\n test_size=0.33,\n shuffle=True,\n stratify=labels_df,\n random_state=RANDOM_SEED\n)", "_____no_output_____" ], [ "X_train", "_____no_output_____" ], [ "# Train model\nfull_pipeline.fit(X_train, y_train)", "C:\\Users\\user\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\nC:\\Users\\user\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\n" ], [ "# Predict on evaluation set\n# This competition wants probabilities, not labels\npreds = full_pipeline.predict_proba(X_test)\npreds", "_____no_output_____" ], [ "print(\"test_probas[0].shape\", preds[0].shape)\nprint(\"test_probas[1].shape\", preds[1].shape)", "test_probas[0].shape (8814, 2)\ntest_probas[1].shape (8814, 2)\n" ], [ "y_pred = pd.DataFrame(\n {\n \"h1n1_vaccine\": preds[0][:, 1],\n \"seasonal_vaccine\": preds[1][:, 1],\n },\n index = y_test.index\n)\nprint(\"y_pred.shape:\", y_pred.shape)\ny_pred.head()", "y_pred.shape: (8814, 2)\n" ], [ "fig, ax = plt.subplots(1, 2, figsize=(7, 3.5))\n\nfpr, tpr, thresholds = roc_curve(y_test['h1n1_vaccine'], y_pred['h1n1_vaccine'])\nax[0].plot(fpr, tpr)\nax[0].plot([0, 1], [0, 1], color='grey', linestyle='--')\nax[0].set_ylabel('TPR')\nax[0].set_xlabel('FPR')\nax[0].set_title(f\"{'h1n1_vaccine'}: AUC = {roc_auc_score(y_test['h1n1_vaccine'], y_pred['h1n1_vaccine']):.4f}\")\n \nfpr, tpr, thresholds = roc_curve(y_test['seasonal_vaccine'], y_pred['seasonal_vaccine'])\nax[1].plot(fpr, tpr)\nax[1].plot([0, 1], [0, 1], color='grey', linestyle='--') \nax[1].set_xlabel('FPR')\nax[1].set_title(f\"{'seasonal_vaccine'}: AUC = {roc_auc_score(y_test['seasonal_vaccine'], y_pred['seasonal_vaccine']):.4f}\")\n\nfig.tight_layout()", "_____no_output_____" ], [ "roc_auc_score(y_test, y_pred)", "_____no_output_____" ] ], [ [ "### Retrain on full Dataset", "_____no_output_____" ] ], [ [ "full_pipeline.fit(features_df_trans, labels_df);", "C:\\Users\\user\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\nC:\\Users\\user\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\n" ] ], [ [ "## PREDICTIONS FOR THE TEST SET", "_____no_output_____" ] ], [ [ "test_features_df = pd.read_csv('data/test_set_features.csv', index_col='respondent_id')", "_____no_output_____" ], [ "test_features_df", "_____no_output_____" ], [ "test_features_df_trans = preprocess_categorical(test_features_df)", "_____no_output_____" ], [ "test_preds = full_pipeline.predict_proba(test_features_df_trans)", "_____no_output_____" ], [ "submission_df = pd.read_csv('data/submission_format.csv', index_col='respondent_id')", "_____no_output_____" ], [ "# Save predictions to submission data frame\nsubmission_df[\"h1n1_vaccine\"] = test_preds[0][:, 1]\nsubmission_df[\"seasonal_vaccine\"] = test_preds[1][:, 1]\n\nsubmission_df.head()", "_____no_output_____" ], [ "submission_df.to_csv('data/my_submission.csv', index=True)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
4a7987848fa835308cc69276a7c72f73112d476e
52,027
ipynb
Jupyter Notebook
Chapter 8 Sentiment Analysis + Chapter 9 Embedding in Web App.ipynb
almoskowitz/Raschka_MLBook
9f019041de66ab8ed558292f58578a36e029825b
[ "MIT" ]
null
null
null
Chapter 8 Sentiment Analysis + Chapter 9 Embedding in Web App.ipynb
almoskowitz/Raschka_MLBook
9f019041de66ab8ed558292f58578a36e029825b
[ "MIT" ]
null
null
null
Chapter 8 Sentiment Analysis + Chapter 9 Embedding in Web App.ipynb
almoskowitz/Raschka_MLBook
9f019041de66ab8ed558292f58578a36e029825b
[ "MIT" ]
null
null
null
50.51165
1,619
0.61718
[ [ [ "In this chapter you will:\n\n* Clean and prepare text data\n* Build feature vectors from text documents\n* Train a machine learning model to classify positive and negative movie reviews\n* Work with large text datasets using out-of-core learning", "_____no_output_____" ] ], [ [ "## Will be working with movie reviews from IMDB database\n## Dataset is 50,000 reviews labeled as positive or negative\n## Positive was rated with more than 6 stars on IMDb\n\n## Read movie reviews into a Dataframe- may take 10 minutes\n\nimport pyprind\nimport pandas as pd\nimport os\npbar = pyprind.ProgBar(50000)\nlabels = {'pos':1, 'neg':0}\ndf = pd.DataFrame()\nfor s in ('test', 'train'):\n for l in ('pos', 'neg'):\n path = './aclImdb/%s/%s' % (s,l)\n for file in os.listdir(path):\n with open(os.path.join(path, file), 'r') as infile: \n txt = infile.read()\n df = df.append([[txt, labels[l]]], ignore_index = True)\n pbar.update()\ndf.columns = ['review', 'sentiment']", "0% [##############################] 100% | ETA: 00:00:00\nTotal time elapsed: 00:02:27\n" ], [ "import numpy as np \nnp.random.seed(0)\ndf = df.reindex(np.random.permutation(df.index))\ndf.to_csv('./movie_data.csv', index = False)", "_____no_output_____" ], [ "df = pd.read_csv('./movie_data.csv')", "_____no_output_____" ], [ "df.head(3)", "_____no_output_____" ] ], [ [ "### Bag-of-words model\n\n1. Create a vocabulary of unique tokens i.e., words from the entire set of documents\n2. Construct a feature vector from each document that contains the counts of how often each word occurs in the particular document\n\nThis will result in sparse vectors ", "_____no_output_____" ] ], [ [ "### Transforming words into feautre vectors\n\nimport numpy as np\nfrom sklearn.feature_extraction.text import CountVectorizer\ncount = CountVectorizer()\ndocs = np.array([\n 'The sun is shining',\n 'The weather is sweet',\n 'The sun is shining and the weather is sweet'])\nbag = count.fit_transform(docs)", "_____no_output_____" ], [ "print (count.vocabulary_)", "{u'and': 0, u'weather': 6, u'sweet': 4, u'sun': 3, u'is': 1, u'the': 5, u'shining': 2}\n" ], [ "print(bag.toarray())\n## referred to as \"raw term frequencies\": tf(t,d) the number of times\n## term t appeared in document d", "[[0 1 1 1 0 1 0]\n [0 1 0 0 1 1 1]\n [1 2 1 1 1 2 1]]\n" ], [ "### Assessing word relevancy via term frequency-inverse document frequeny\n## TF-idf - downweights frequently occurring words\n## Defined as the product of term freqency and inverse document frequency\n## Inverse doc frequency is\n\n## log [(n_d)/(1+df(d,t))]\n## where n_d is the total number of docuements \n## and df(d,t) is the number of docs d that contain term t\n\n## Sci-kit learn has a transofrmer for this\n\nfrom sklearn.feature_extraction.text import TfidfTransformer\ntfidf = TfidfTransformer()\nnp.set_printoptions(precision=2)\nprint(tfidf.fit_transform(count.fit_transform(docs)).toarray())", "[[ 0. 0.43 0.56 0.56 0. 0.43 0. ]\n [ 0. 0.43 0. 0. 0.56 0.43 0.56]\n [ 0.4 0.48 0.31 0.31 0.31 0.48 0.31]]\n" ] ], [ [ "### Cleaning Text Data", "_____no_output_____" ] ], [ [ "## strip out all unwanted characters\n\ndf.loc[0, 'review'][-50:]", "_____no_output_____" ], [ "### Had to edit from book- mistake in book near .join\nimport re\ndef preprocessor(text):\n text = re.sub('<[^>]*>', '', text)\n emoticons = re.findall('(?::|;|=)(?:-)?(?:\\)|\\(|D|P)', text)\n text = re.sub('[\\W]+', ' ', text.lower()) + ' '.join(emoticons).replace('-', '')\n return text", "_____no_output_____" ], [ "preprocessor(df.loc[0, 'review'][-50:])", "_____no_output_____" ], [ "preprocessor(\"</a>This :) is :( a test :-) !\")", "_____no_output_____" ], [ "df['review']= df['review'].apply(preprocessor)", "_____no_output_____" ] ], [ [ "### Processing documents into tokens", "_____no_output_____" ] ], [ [ "## Need to figure out how to split the text into individual elements\n## Can tokenize words by splitting at the whitespace characters\n\ndef tokenizer(text):\n return text.split()\n\ntokenizer('runners like running and thus they run')", "_____no_output_____" ], [ "## Word stemming is taking the word root and mapping words that are similar\n## nltk uses the porter stemming alorithm\n\nfrom nltk.stem.porter import PorterStemmer\nporter = PorterStemmer()\ndef tokenizer_porter(text):\n return [porter.stem(word) for word in text.split()]\n\ntokenizer_porter('runners like running and thus they run')", "_____no_output_____" ], [ "## lemmatizaiton aims to obtain the canonical forms of individual words \n## stemming and lemmatization have little effect on performance\n\n### Stop word removal \n## Because they are so common, stop words only have a minimal effect\n## on the classification \n\nimport nltk\nnltk.download('stopwords')\n\n", "[nltk_data] Downloading package stopwords to\n[nltk_data] /Users/andrew.moskowitz/nltk_data...\n[nltk_data] Package stopwords is already up-to-date!\n" ], [ "from nltk.corpus import stopwords\nstop = stopwords.words('english')\n[w for w in tokenizer_porter('a runner likes running and runs a lot')[-10:] if w not in stop]", "_____no_output_____" ] ], [ [ "### Training a logistic regression model for document classification", "_____no_output_____" ] ], [ [ "## Divide dataframe into 25,000 training and 25,000 test\n\nX_train = df.loc[:25000, 'review'].values\ny_train = df.loc[:25000, 'sentiment'].values\nX_test = df.loc[25000:, 'review'].values\ny_test = df.loc[25000:, 'sentiment'].values", "_____no_output_____" ], [ "from sklearn.grid_search import GridSearchCV\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\ntfidf = TfidfVectorizer(strip_accents = None,\n lowercase = False,\n preprocessor = None)\nparam_grid = [{'vect__ngram_range': [(1,1)],\n 'vect__stop_words': [stop, None],\n 'vect__tokenizer': [tokenizer, tokenizer_porter],\n 'clf__penalty': ['l1', 'l2'],\n 'clf__C': [1.0, 10.0, 100.0]},\n {'vect__ngram_range': [(1,1)],\n 'vect__stop_words': [stop, None],\n 'vect__tokenizer': [tokenizer, tokenizer_porter],\n 'vect__use_idf': [False],\n 'vect__norm':[None],\n 'clf__penalty': ['l1', 'l2'],\n 'clf__C': [1.0, 10.0, 100.0]}]\n\nlr_tfidf = Pipeline([('vect', tfidf), ('clf', LogisticRegression(random_state = 0))])\n\ngs_lr_tfidf = GridSearchCV(lr_tfidf, param_grid, scoring='accuracy', cv = 5, verbose = 1, n_jobs = -1)\ngs_lr_tfidf.fit(X_train, y_train)", "Fitting 5 folds for each of 48 candidates, totalling 240 fits\n" ], [ "print('Best Parameter set: %s ' % gs_lr_tfidf.best_params_)\nprint('CV Accuracy: %.3f' % gs_lr_tfidf.best_score_)\nclf = gs_lr_tfidf.best_estimator_\nprint('Test Accuracy: %.3f' % clf.score(X_test, y_test))", "_____no_output_____" ] ], [ [ "Naieve bayes classifiers are also popular for this kind of work. Can read about them in:\n\nS.Raschka Naive Bayes and Text Classification I - introduction and Theory. Computing Research Repository (CoRR), abs/1410.5329, 2014. Http://arxiv.org/pdf/1410.5329v3.pdf", "_____no_output_____" ], [ "### Out of core learning\n\nCan stream little bits of data at a time to train the model and update the estimates", "_____no_output_____" ] ], [ [ "import numpy as np\nimport re\nfrom nltk.corpus import stopwords\nstop = stopwords.words('english')\ndef tokenizer(text):\n text = re.sub('<[*>]*.', '', text)\n emoticons = re.findall('(?::|;|=)(?:-)?(?:\\)|\\(|D|P)', text.lower())\n text = re.sub('[\\W]+', ' ', text.lower()) + ' '.join(emoticons).replace('-', '')\n tokenized = [w for w in text.split() if w not in stop]\n return tokenized", "_____no_output_____" ], [ "def stream_docs(path):\n with open(path, 'r') as csv:\n next(csv)\n for line in csv:\n text, label = line[:-3], int (line[-2])\n yield text, label", "_____no_output_____" ], [ "next(stream_docs(path='./movie_data.csv'))", "_____no_output_____" ], [ "def get_minibatch(doc_stream, size):\n docs ,y = [], []\n try:\n for _ in range(size):\n text, label = next(doc_stream)\n docs.append(text)\n y.append(label)\n except StopIteration:\n return None, None\n return docs, y", "_____no_output_____" ], [ "### Use a hashing trick to be able to calculate counts out of memory\n\nfrom sklearn.feature_extraction.text import HashingVectorizer\nfrom sklearn.linear_model import SGDClassifier\nvect = HashingVectorizer(decode_error='ignore',\n n_features = 2**21,\n preprocessor=None,\n tokenizer = tokenizer)\n\nclf = SGDClassifier(loss = 'log', random_state = 1, n_iter = 1)\ndoc_stream = stream_docs(path='./movie_data.csv')", "_____no_output_____" ], [ "## Initialized progress bar with 45 minibatches of 1000 docs each\n## use the las 5000 for performance \n\nimport pyprind\npbar = pyprind.ProgBar(45)\nclasses = np.array([0,1])\nfor _ in range(45):\n X_train, y_train = get_minibatch(doc_stream, size = 1000)\n if not X_train:\n break\n X_train = vect.transform(X_train)\n clf.partial_fit(X_train, y_train, classes = classes)\n pbar.update()", "/Users/andrew.moskowitz/anaconda2/lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.py:117: DeprecationWarning: n_iter parameter is deprecated in 0.19 and will be removed in 0.21. Use max_iter and tol instead.\n DeprecationWarning)\n/Users/andrew.moskowitz/anaconda2/lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.py:117: DeprecationWarning: n_iter parameter is deprecated in 0.19 and will be removed in 0.21. Use max_iter and tol instead.\n DeprecationWarning)\n0% [# ] 100% | ETA: 00:01:25/Users/andrew.moskowitz/anaconda2/lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.py:117: DeprecationWarning: n_iter parameter is deprecated in 0.19 and will be removed in 0.21. Use max_iter and tol instead.\n DeprecationWarning)\n0% [## ] 100% | ETA: 00:01:16/Users/andrew.moskowitz/anaconda2/lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.py:117: DeprecationWarning: n_iter parameter is deprecated in 0.19 and will be removed in 0.21. Use max_iter and tol instead.\n DeprecationWarning)\n/Users/andrew.moskowitz/anaconda2/lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.py:117: DeprecationWarning: n_iter parameter is deprecated in 0.19 and will be removed in 0.21. Use max_iter and tol instead.\n DeprecationWarning)\n0% [### ] 100% | ETA: 00:01:12/Users/andrew.moskowitz/anaconda2/lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.py:117: DeprecationWarning: n_iter parameter is deprecated in 0.19 and will be removed in 0.21. Use max_iter and tol instead.\n DeprecationWarning)\n/Users/andrew.moskowitz/anaconda2/lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.py:117: DeprecationWarning: n_iter parameter is deprecated in 0.19 and will be removed in 0.21. Use max_iter and tol instead.\n DeprecationWarning)\n0% [#### ] 100% | ETA: 00:01:12/Users/andrew.moskowitz/anaconda2/lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.py:117: DeprecationWarning: n_iter parameter is deprecated in 0.19 and will be removed in 0.21. Use max_iter and tol instead.\n DeprecationWarning)\n0% [##### ] 100% | ETA: 00:01:11/Users/andrew.moskowitz/anaconda2/lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.py:117: DeprecationWarning: n_iter parameter is deprecated in 0.19 and will be removed in 0.21. Use max_iter and tol instead.\n DeprecationWarning)\n0% [###### ] 100% | ETA: 00:01:08/Users/andrew.moskowitz/anaconda2/lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.py:117: DeprecationWarning: n_iter parameter is deprecated in 0.19 and will be removed in 0.21. Use max_iter and tol instead.\n DeprecationWarning)\n/Users/andrew.moskowitz/anaconda2/lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.py:117: DeprecationWarning: n_iter parameter is deprecated in 0.19 and will be removed in 0.21. Use max_iter and tol instead.\n DeprecationWarning)\n0% [####### ] 100% | ETA: 00:01:03/Users/andrew.moskowitz/anaconda2/lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.py:117: DeprecationWarning: n_iter parameter is deprecated in 0.19 and will be removed in 0.21. Use max_iter and tol instead.\n DeprecationWarning)\n0% [######## ] 100% | ETA: 00:01:01/Users/andrew.moskowitz/anaconda2/lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.py:117: DeprecationWarning: n_iter parameter is deprecated in 0.19 and will be removed in 0.21. Use max_iter and tol instead.\n DeprecationWarning)\n/Users/andrew.moskowitz/anaconda2/lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.py:117: DeprecationWarning: n_iter parameter is deprecated in 0.19 and will be removed in 0.21. Use max_iter and tol instead.\n DeprecationWarning)\n0% [######### ] 100% | ETA: 00:00:59/Users/andrew.moskowitz/anaconda2/lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.py:117: DeprecationWarning: n_iter parameter is deprecated in 0.19 and will be removed in 0.21. Use max_iter and tol instead.\n DeprecationWarning)\n/Users/andrew.moskowitz/anaconda2/lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.py:117: DeprecationWarning: n_iter parameter is deprecated in 0.19 and will be removed in 0.21. Use max_iter and tol instead.\n DeprecationWarning)\n0% [########## ] 100% | ETA: 00:00:53/Users/andrew.moskowitz/anaconda2/lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.py:117: DeprecationWarning: n_iter parameter is deprecated in 0.19 and will be removed in 0.21. Use max_iter and tol instead.\n DeprecationWarning)\n0% [########### ] 100% | ETA: 00:00:50/Users/andrew.moskowitz/anaconda2/lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.py:117: DeprecationWarning: n_iter parameter is deprecated in 0.19 and will be removed in 0.21. Use max_iter and tol instead.\n DeprecationWarning)\n0% [############ ] 100% | ETA: 00:00:48/Users/andrew.moskowitz/anaconda2/lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.py:117: DeprecationWarning: n_iter parameter is deprecated in 0.19 and will be removed in 0.21. Use max_iter and tol instead.\n DeprecationWarning)\n/Users/andrew.moskowitz/anaconda2/lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.py:117: DeprecationWarning: n_iter parameter is deprecated in 0.19 and will be removed in 0.21. Use max_iter and tol instead.\n DeprecationWarning)\n0% [############# ] 100% | ETA: 00:00:46/Users/andrew.moskowitz/anaconda2/lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.py:117: DeprecationWarning: n_iter parameter is deprecated in 0.19 and will be removed in 0.21. Use max_iter and tol instead.\n DeprecationWarning)\n0% [############## ] 100% | ETA: 00:00:44/Users/andrew.moskowitz/anaconda2/lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.py:117: DeprecationWarning: n_iter parameter is deprecated in 0.19 and will be removed in 0.21. Use max_iter and tol instead.\n DeprecationWarning)\n/Users/andrew.moskowitz/anaconda2/lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.py:117: DeprecationWarning: n_iter parameter is deprecated in 0.19 and will be removed in 0.21. Use max_iter and tol instead.\n DeprecationWarning)\n0% [############### ] 100% | ETA: 00:00:40/Users/andrew.moskowitz/anaconda2/lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.py:117: DeprecationWarning: n_iter parameter is deprecated in 0.19 and will be removed in 0.21. Use max_iter and tol instead.\n DeprecationWarning)\n/Users/andrew.moskowitz/anaconda2/lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.py:117: DeprecationWarning: n_iter parameter is deprecated in 0.19 and will be removed in 0.21. Use max_iter and tol instead.\n DeprecationWarning)\n0% [################ ] 100% | ETA: 00:00:36/Users/andrew.moskowitz/anaconda2/lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.py:117: DeprecationWarning: n_iter parameter is deprecated in 0.19 and will be removed in 0.21. Use max_iter and tol instead.\n DeprecationWarning)\n0% [################# ] 100% | ETA: 00:00:34/Users/andrew.moskowitz/anaconda2/lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.py:117: DeprecationWarning: n_iter parameter is deprecated in 0.19 and will be removed in 0.21. Use max_iter and tol instead.\n DeprecationWarning)\n0% [################## ] 100% | ETA: 00:00:32/Users/andrew.moskowitz/anaconda2/lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.py:117: DeprecationWarning: n_iter parameter is deprecated in 0.19 and will be removed in 0.21. Use max_iter and tol instead.\n DeprecationWarning)\n/Users/andrew.moskowitz/anaconda2/lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.py:117: DeprecationWarning: n_iter parameter is deprecated in 0.19 and will be removed in 0.21. Use max_iter and tol instead.\n DeprecationWarning)\n" ], [ "X_test, y_test = get_minibatch(doc_stream, size = 5000)\nX_test = vect.transform(X_test)\nprint('Accuracy: %.3f' % clf.score(X_test, y_test))", "Accuracy: 0.866\n" ], [ "## Add last 5000 docs to update the model\nclf = clf.partial_fit(X_test, y_test)", "/Users/andrew.moskowitz/anaconda2/lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.py:117: DeprecationWarning: n_iter parameter is deprecated in 0.19 and will be removed in 0.21. Use max_iter and tol instead.\n DeprecationWarning)\n" ] ], [ [ "A Popular extension of this model that accounts for structre and grammar is the LDA or Latent Dirichlet allocation\n\nWord2vec is a more modern application of the bag-of-words model \n uses neural networks to automatically learn relationships betweenw ords", "_____no_output_____" ], [ "# Chapter 9 Embedding a Machine Learning Model into a Web Application", "_____no_output_____" ], [ "Session was kept open at the suggestion of the authors as we use the same model that was geneated in the previous chapter\n\nOne way for \"model persistence\" (being able to reuse a trained model) is serializing and deserializing our python objects. this allows us to save and reload the current state of our model. ", "_____no_output_____" ] ], [ [ "import pickle\nimport os\ndest = os.path.join('movieclassifier', 'pkl_objects')\nif not os.path.exists(dest):\n os.makedirs(dest)\npickle.dump(stop,\n open(os.path.join(dest, 'stopwords.pkl'), 'wb'), protocol = 2)\npickle.dump(clf, open(os.path.join(dest, 'classifier.pkl'), 'wb'), protocol = 2)", "_____no_output_____" ] ], [ [ "The next bit to test the serializer and vetorizer was done in an ipython session", "_____no_output_____" ], [ "### Setting up a SQLite database for data storage", "_____no_output_____" ] ], [ [ "### Create a new sql lite database inside movieclassifier to\n### collect optional feedback about predictions from users\n\nimport sqlite3\nimport os\nconn = sqlite3.connect('reviews.sqlite')\nc = conn.cursor()\nc.execute('CREATE TABLE review_db (review TEXT, sentiment INTEGER, date TEXT)')\nexample1 = 'I love this movie'\nc.execute(\"INSERT INTO review_db (review, sentiment, date) VALUES (?, ?, DATETIME('now'))\", (example1, 1))\nexample2 = 'I disliked this movie'\nc.execute(\"INSERT INTO review_db (review, sentiment, date) VALUES (?, ?, DATETIME('now'))\", (example2, 0))\nconn.commit()\nconn.close()", "_____no_output_____" ], [ "conn = sqlite3.connect('reviews.sqlite')\nc = conn.cursor()\nc.execute(\"SELECT * FROM review_db where date BETWEEN '2018-01-01 00:00:00' AND DATETIME('now')\")\nresults = c.fetchall()\nconn.close()\nprint(results)", "[(u'I love this movie', 1, u'2018-03-19 03:49:07'), (u'I disliked this movie', 0, u'2018-03-19 03:49:07')]\n" ], [ "import flask", "_____no_output_____" ], [ "import wtforms", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ] ]
4a798c172e6bad582a924b9e1ce50de861c976f7
184,047
ipynb
Jupyter Notebook
examples/advanced_circuits_algorithms/QPE/QPE.ipynb
jzhzhu/amazon-braket-examples
a5cce895175358698526fef7fd6d7026c9c2961f
[ "Apache-2.0" ]
1
2021-07-10T14:48:50.000Z
2021-07-10T14:48:50.000Z
examples/advanced_circuits_algorithms/QPE/QPE.ipynb
jzhzhu/amazon-braket-examples
a5cce895175358698526fef7fd6d7026c9c2961f
[ "Apache-2.0" ]
null
null
null
examples/advanced_circuits_algorithms/QPE/QPE.ipynb
jzhzhu/amazon-braket-examples
a5cce895175358698526fef7fd6d7026c9c2961f
[ "Apache-2.0" ]
null
null
null
121.724206
75,908
0.83458
[ [ [ "# QUANTUM PHASE ESTIMATION", "_____no_output_____" ], [ "This tutorial provides a detailed implementation of the Quantum Phase Estimation (QPE) algorithm using the Amazon Braket SDK.\nThe QPE algorithm is designed to estimate the eigenvalues of a unitary operator $U$ [1, 2]; \nit is a very important subroutine to many quantum algorithms, most famously Shor's algorithm for factoring and the HHL algorithm (named after the physicists Harrow, Hassidim and Lloyd) for solving linear systems of equations on a quantum computer [1, 2]. \nMoreover, eigenvalue problems can be found across many disciplines and application areas, including (for example) principal component analysis (PCA) as used in machine learning or the solution of differential equations as relevant across mathematics, physics, engineering and chemistry. \nWe first review the basics of the QPE algorithm.\nWe then implement the QPE algorithm in code using the Amazon Braket SDK, and we illustrate the application thereof with simple examples. \nThis notebook also showcases the Amazon Braket `circuit.subroutine` functionality, which allows us to use custom-built gates as if they were any other built-in gates. \nThis tutorial is set up to run either on the local simulator or the managed simulators; changing between these devices merely requires changing one line of code as demonstrated as follows in cell [4]. ", "_____no_output_____" ], [ "## TECHNICAL BACKGROUND OF QPE ", "_____no_output_____" ], [ "__Introduction__: A unitary matrix is a complex, square matrix whose adjoint (or conjugate transpose) is equal to its inverse. Unitary matrices have many nice properties, including the fact that their eigenvalues are always roots of unity (that is, phases). Given a unitary matrix $U$ (satisfying $U^{\\dagger}U=\\mathbb{1}=UU^{\\dagger}$) and an eigenstate $|\\psi \\rangle$ with $U|\\psi \\rangle = e^{2\\pi i\\varphi}|\\psi \\rangle$, the Quantum Phase Estimation (QPE) algorithm provides an estimate $\\tilde{\\varphi} \\approx \\varphi$ for the phase $\\varphi$ (with $\\varphi \\in [0,1]$ since the eigenvalues $\\lambda = \\exp(2\\pi i\\varphi)$ of a unitary have modulus one). \nThe QPE works with high probability within an additive error $\\varepsilon$ using $O(\\log(1/\\varepsilon))$ qubits (without counting the qubits used to encode the eigenstate) and $O(1/\\varepsilon)$ controlled-$U$ operations [1].\n\n__Quantum Phase Estimation Algorithm__: \nThe QPE algorithm takes a unitary $U$ as input. For the sake of simplicity (we will generalize the discussion below), suppose that the algorithm also takes as input an eigenstate $|\\psi \\rangle$ fulfilling \n\n$$U|\\psi \\rangle = \\lambda |\\psi \\rangle,$$\n\nwith $\\lambda = \\exp(2\\pi i\\varphi)$. \n\nQPE uses two registers of qubits: we refer to the first register as *precision* qubits (as the number of qubits $n$ in the first register sets the achievable precision of our results) and the second register as *query* qubits (as the second register hosts the eigenstate $|\\psi \\rangle$). \nSuppose we have prepared this second register in $|\\psi \\rangle$. We then prepare a uniform superposition of all basis vectors in the first register using a series of Hadamard gates. \n\nNext, we apply a series of controlled-unitaries $C-U^{2^{k}}$ for different powers of $k=0,1,\\dots, n-1$ (as illustrated in the circuit diagram that follows). \nFor example, for $k=1$ we get\n\\begin{equation} \n\\begin{split}\n(|0 \\rangle + |1 \\rangle) |\\psi \\rangle & \\rightarrow |0 \\rangle |\\psi \\rangle + |1 \\rangle U|\\psi \\rangle \\\\\n& = (|0 \\rangle + e^{2\\pi i \\varphi}|1 \\rangle) |\\psi \\rangle.\n\\end{split}\n\\end{equation}\n\nNote that the second register remains unaffected as it stays in the eigenstate $|\\psi \\rangle$. \nHowever, we managed to transfer information about the phase of the eigenvalue of $U$ (that is, $\\varphi$) into the first *precision* register by encoding it as a relative phase in the state of the qubits in the first register. \n\nSimilarly, for $k=2$ we obtain\n\\begin{equation} \n\\begin{split}\n(|0 \\rangle + |1 \\rangle) |\\psi \\rangle & \\rightarrow |0 \\rangle |\\psi \\rangle + |1 \\rangle U^{2}|\\psi \\rangle \\\\\n& = (|0 \\rangle + e^{2\\pi i 2\\varphi}|1 \\rangle) |\\psi \\rangle,\n\\end{split}\n\\end{equation}\n\nwhere this time we wrote $2\\varphi$ into the precision register. The process is similar for all $k>2$.\n\nIntroducing the following notation for binary fractions\n$$[0. \\varphi_{l}\\varphi_{l+1}\\dots \\varphi_{m}] = \\frac{\\varphi_{l}}{2^{1}} + \\frac{\\varphi_{l+1}}{2^{2}} + \\frac{\\varphi_{m}}{2^{m-l+1}},$$ \n\none can show that the application of a controlled unitary $C-U^{2^{k}}$ leads to the following transformation\n\n\\begin{equation} \n\\begin{split}\n(|0 \\rangle + |1 \\rangle) |\\psi \\rangle & \\rightarrow |0 \\rangle |\\psi \\rangle + |1 \\rangle U^{2^{k}}|\\psi \\rangle \\\\\n& = (|0 \\rangle + e^{2\\pi i 2^{k}\\varphi}|1 \\rangle) |\\psi \\rangle \\\\\n& = (|0 \\rangle + e^{2\\pi i [0.\\varphi_{k+1}\\dots \\varphi_{n}]}|1 \\rangle) |\\psi \\rangle,\n\\end{split}\n\\end{equation}\n\nwhere the first $k$ bits of precision in the binary expansion (that is, those bits to the left of the decimal) can be dropped, because $e^{2\\pi i \\theta} = 1$ for any whole number $\\theta$.\n\nThe QPE algorithm implements a series of these transformations for $k=0, 1, \\dots, n-1$, using $n$ qubits in the precision register. \nIn its entirety, this sequence of controlled unitaries leads to the transformation\n\n$$ |0, \\dots, 0 \\rangle \\otimes |\\psi \\rangle \\longrightarrow \n(|0 \\rangle + e^{2\\pi i [0.\\varphi_{n}]}|1 \\rangle) \n\\otimes (|0 \\rangle + e^{2\\pi i [0.\\varphi_{n-1}\\varphi_{n}]}|1 \\rangle)\n\\otimes \\dots\n\\otimes (|0 \\rangle + e^{2\\pi i [0.\\varphi_{1}\\dots\\varphi_{n}]}|1 \\rangle) \n\\otimes |\\psi \\rangle.\n$$\n\nBy inspection, one can see that the state of the register qubits above corresponds to a quantum Fourier transform of the state $|\\varphi_1,\\dots,\\varphi_n\\rangle$. Thus, the final step of the QPE algorithm is to run the *inverse* Quantum Fourier Transform (QFT) algorithm on the precision register to extract the phase information from this state. The resulting state is\n$$|\\varphi_{1}, \\varphi_{2}, \\dots, \\varphi_{n} \\rangle \\otimes |\\psi\\rangle.$$\n\nMeasuring the precision qubits in the computational basis then gives the classical bitstring $\\varphi_{1}, \\varphi_{2}, \\dots, \\varphi_{n}$, from which we can readily infer the phase estimate $\\tilde{\\varphi} = 0.\\varphi_{1} \\dots \\varphi_{n}$ with the corresponding eigenvalue $\\tilde{\\lambda} = \\exp(2\\pi i \\tilde{\\varphi})$.\n \n__Simple example for illustration__: For concreteness, consider a simple example with the unitary given by the Pauli $X$ gate, $U=X$, for which $|\\Psi \\rangle = |+\\rangle = (|0 \\rangle + |1 \\rangle)/\\sqrt{2}$ is an eigenstate with eigenvalue $\\lambda = 1$, i.e., $\\varphi=0$. \nThis state can be prepared with a Hadamard gate as $|\\Psi \\rangle = H|0 \\rangle$. \nWe take a precision register consisting of just two qubits ($n=2$). \n\nThus, after the first layer of Hadamard gates, the quantum state is\n$$|0,0,0 \\rangle \\rightarrow |+,+,+\\rangle.$$\n\nNext, the applications of the controlled-$U$ gates (equal to $C-X$ operations, or CNOT gates in this example) leave this state untouched, because $|+\\rangle$ is an eigenstate of $X$ with eigenvalue $+1$. \nFinally, applying the inverse QFT leads to \n\n$$\\mathrm{QFT}^{\\dagger}|+++\\rangle=\\mathrm{QFT}^\\dagger\\frac{|00\\rangle + |01\\rangle + |10\\rangle + |11\\rangle}{4}\\otimes |+\\rangle = |00\\rangle \\otimes |+\\rangle,$$\n\nfrom which we deduce $\\varphi = [0.00]=0$ and therefore $\\lambda=1$, as expected. \nHere, in the last step we have used $|00\\rangle + |01\\rangle + |10\\rangle + |11\\rangle = (|0\\rangle + e^{2\\pi i[0.0]}|1\\rangle)(|0\\rangle + e^{2\\pi i[0.00]}|1\\rangle)$, which makes the effect of the inverse QFT more apparent. \n\n__Initial state of query register__: So far, we have assumed that the query register is prepared in an eigenstate $|\\Psi\\rangle$ of $U$. What happens if this is not the case? Let's reconsider the simple example given previously.\n\nSuppose now that the query register is instead prepared in the state $|\\Psi\\rangle = |1\\rangle$. \nWe can always express this state in the eigenbasis of $U$, that is, $|1\\rangle = \\frac{1}{\\sqrt{2}}(|+\\rangle - |-\\rangle)$. \nBy linearity, application of the QPE algorithm then gives (up to normalization)\n\n\\begin{equation} \n\\begin{split}\n\\mathrm{QPE}(|0,0,\\dots\\rangle \\otimes |1\\rangle) & = \\mathrm{QPE}(|0,0,\\dots\\rangle \\otimes |+\\rangle)\n- \\mathrm{QPE}(|0,0,\\dots\\rangle \\otimes |-\\rangle) \\\\\n& = |\\varphi_{+}\\rangle \\otimes |+\\rangle - |\\varphi_{-}\\rangle \\otimes |-\\rangle. \\\\\n\\end{split}\n\\end{equation}\n\nWhen we measure the precision qubits in this state, 50% of the time we will observe the eigenphase $\\varphi_{+}$ and 50% of the time we will measure $\\varphi_{-}$. We illustrate this example numerically as follows.\n\nThis example motivates the general case: we can pass a state that is not an eigenstate of $U$ to the QPE algorithm, but we may need to repeat our measurements several times in order to obtain an estimate of the desired phase.", "_____no_output_____" ], [ "## CIRCUIT IMPLEMENTATION OF QPE", "_____no_output_____" ], [ "The QPE circuit can be implemented using Hadamard gates, controlled-$U$ unitaries, and the inverse QFT (denoted as $\\mathrm{QFT}^{-1}$). \nThe details of the calculation can be found in a number of resources (such as, [1]); we omit them here.\nFollowing the previous discussion, the circuit that implements the QPE algorithm reads as below, where m is the size of lower query register and n is the size of upper precision register.", "_____no_output_____" ], [ "![image.png](attachment:image.png)", "_____no_output_____" ], [ "## IMPORTS and SETUP", "_____no_output_____" ] ], [ [ "# general imports\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\n# magic word for producing visualizations in notebook\n%matplotlib inline", "_____no_output_____" ], [ "# AWS imports: Import Amazon Braket SDK modules\nfrom braket.circuits import Circuit, circuit\nfrom braket.devices import LocalSimulator\nfrom braket.aws import AwsDevice", "_____no_output_____" ], [ "# local imports\nfrom utils_qpe import qpe, run_qpe\n\n%load_ext autoreload\n%autoreload 2", "_____no_output_____" ] ], [ [ "__NOTE__: Enter your desired device and S3 location (bucket and key) in the following area. If you are working with the local simulator ```LocalSimulator()``` you do not need to specify any S3 location. However, if you are using the managed (cloud-based) device or any QPU devices, you must specify the S3 location where your results will be stored. In this case, you must replace the API call ```device.run(circuit, ...)``` in the example that follows with ```device.run(circuit, s3_folder, ...)```. ", "_____no_output_____" ] ], [ [ "# set up device: local simulator or the managed cloud-based simulator\n# device = LocalSimulator()\ndevice = AwsDevice(\"arn:aws:braket:::device/quantum-simulator/amazon/sv1\")\n\n# Enter the S3 bucket you created during onboarding into the code that follows\nmy_bucket = \"amazon-braket-Your-Bucket-Name\" # the name of the bucket\nmy_prefix = \"Your-Folder-Name\" # the name of the folder in the bucket\ns3_folder = (my_bucket, my_prefix)", "_____no_output_____" ] ], [ [ "### Pauli Matrices:\nIn some of our examples, we choose the unitary $U$ to be given by the **Pauli Matrices**, which we thus define as follows:", "_____no_output_____" ] ], [ [ "# Define Pauli matrices\nId = np.eye(2) # Identity matrix\nX = np.array([[0., 1.],\n [1., 0.]]) # Pauli X\nY = np.array([[0., -1.j],\n [1.j, 0.]]) # Pauli Y\nZ = np.array([[1., 0.],\n [0., -1.]]) # Pauli Z", "_____no_output_____" ] ], [ [ "## IMPLEMENTATION OF THE QPE CIRCUIT", "_____no_output_____" ], [ "In ```utils_qpe.py``` we provide simple helper functions to implement the quantum circuit for the QPE algorithm. \nSpecifically, we demonstrate that such modular building blocks can be registered as subroutines, using ```@circuit.subroutine(register=True)```. \nMoreover, we provide a helper function (called ```get_qpe_phases```) to perform postprocessing based on the measurement results to extract the phase. The details of ```utils_qpe.py``` are shown in the Appendix.\n\nTo implement the unitary $C-U^{2^k}$, one can use the fact that $C-U^{2} = (C-U)(C-U)$, so that $C-U^{2^{k}}$ can be constructed by repeatedly applying the core building block $C-U$. \nHowever, the circuit generated using this approach will have a significantly larger depth. In our implementation, we instead define the matrix $U^{2^k}$ and create the controlled $C-(U^{2^k})$ gate from that.", "_____no_output_____" ], [ "## VISUALIZATION OF THE QFT CIRCUIT", "_____no_output_____" ], [ "To check our implementation of the QPE circuit, we visualize this circuit for a small number of qubits. ", "_____no_output_____" ] ], [ [ "# set total number of qubits\nprecision_qubits = [0, 1]\nquery_qubits = [2]\n\n# prepare query register\nmy_qpe_circ = Circuit().h(query_qubits)\n\n# set unitary\nunitary = X\n\n# show small QPE example circuit\nmy_qpe_circ = my_qpe_circ.qpe(precision_qubits, query_qubits, unitary)\nprint('QPE CIRCUIT:')\nprint(my_qpe_circ)", "QPE CIRCUIT:\nT : |0|1|2| 3 |4| 5 |6|\n \nq0 : -H---U-SWAP---PHASE(-1.57)-H-\n | | | \nq1 : -H-U-|-SWAP-H-C--------------\n | | \nq2 : -H-U-U-----------------------\n\nT : |0|1|2| 3 |4| 5 |6|\n" ] ], [ [ "As shown in the folllowing code, the two registers can be distributed anywhere across the circuit, with arbitrary indices for the precision and the query registers. ", "_____no_output_____" ] ], [ [ "# set qubits\nprecision_qubits = [1, 3]\nquery_qubits = [5]\n\n# prepare query register\nmy_qpe_circ = Circuit().i(range(7))\nmy_qpe_circ.h(query_qubits)\n\n# set unitary\nunitary = X\n\n# show small QPE example circuit\nmy_qpe_circ = my_qpe_circ.qpe(precision_qubits, query_qubits, unitary)\nprint('QPE CIRCUIT:')\nprint(my_qpe_circ)", "QPE CIRCUIT:\nT : |0|1|2|3| 4 |5| 6 |7|\n \nq0 : -I-----------------------------\n \nq1 : -I-H---U-SWAP---PHASE(-1.57)-H-\n | | | \nq2 : -I-----|-|------|--------------\n | | | \nq3 : -I-H-U-|-SWAP-H-C--------------\n | | \nq4 : -I---|-|-----------------------\n | | \nq5 : -I-H-U-U-----------------------\n \nq6 : -I-----------------------------\n\nT : |0|1|2|3| 4 |5| 6 |7|\n" ] ], [ [ "As follows, we set up the same circuit, this time implementing the unitary $C-U^{2^k}$, by repeatedly applying the core building block $C-U$. \nThis operation can be done by setting the parameter ```control_unitary=False``` (default is ```True```). ", "_____no_output_____" ] ], [ [ "# set qubits\nprecision_qubits = [1, 3]\nquery_qubits = [5]\n\n# prepare query register\nmy_qpe_circ = Circuit().i(range(7))\nmy_qpe_circ.h(query_qubits)\n\n# set unitary\nunitary = X\n\n# show small QPE example circuit\nmy_qpe_circ = my_qpe_circ.qpe(precision_qubits, query_qubits, unitary, control_unitary=False)\nprint('QPE CIRCUIT:')\nprint(my_qpe_circ)", "QPE CIRCUIT:\nT : |0|1|2|3|4| 5 |6| 7 |8|\n \nq0 : -I-------------------------------\n \nq1 : -I-H---U-U-SWAP---PHASE(-1.57)-H-\n | | | | \nq2 : -I-----|-|-|------|--------------\n | | | | \nq3 : -I-H-U-|-|-SWAP-H-C--------------\n | | | \nq4 : -I---|-|-|-----------------------\n | | | \nq5 : -I-H-U-U-U-----------------------\n \nq6 : -I-------------------------------\n\nT : |0|1|2|3|4| 5 |6| 7 |8|\n" ] ], [ [ "In the circuit diagram, we can visually infer the exponents for $k=0,1$, at the expense of a larger circuit depth. ", "_____no_output_____" ], [ "## NUMERICAL TEST EXPERIMENTS", "_____no_output_____" ], [ "In the following section, we verify that our QFT implementation works as expected with a few test examples:\n1. We run QPE with $U=X$ and prepare the eigenstate $|\\Psi\\rangle = |+\\rangle = H|0\\rangle$ with phase $\\varphi=0$ and eigenvalue $\\lambda=1$. \n2. We run QPE with $U=X$ and prepare the eigenstate $|\\Psi\\rangle = |-\\rangle = HX|0\\rangle$ with phase $\\varphi=0.5$ and eigenvalue $\\lambda=-1$. \n3. We run QPE with $U=X$ and prepare $|\\Psi\\rangle = |1\\rangle = X|0\\rangle$ which is *not* an eigenstate of $U$. \nBecause $|1\\rangle = (|+\\rangle - |-\\rangle)/\\sqrt{2}$, we expect to measure both $\\varphi=0$ and $\\varphi=0.5$ associated with the two eigenstates $|\\pm\\rangle$. \n4. We run QPE with unitary $U=X \\otimes Z$, and prepare the query register in the eigenstate $|\\Psi\\rangle = |+\\rangle \\otimes |1\\rangle = H|0\\rangle \\otimes Z|0\\rangle$. \nHere, we expect to measure the phase $\\varphi=0.5$ (giving the corresponding eigenvalue $\\lambda=-1$). \n5. We run QPE with a _random_ two qubit unitary, diagonal in the computational basis, and prepare the query register in the eigenstate $|11\\rangle$.\nIn this case, we should be able to read off the eigenvalue and phase from $U$ and verify QPE gives the right answer (with high probability) up to a small error (that depends on the number of qubits in the precision register).", "_____no_output_____" ], [ "## HELPER FUNCTIONS FOR NUMERICAL TESTS\nBecause we will run the same code repeatedly, let's first create a helper function we can use to keep the notebook clean.", "_____no_output_____" ] ], [ [ "def postprocess_qpe_results(out):\n \"\"\"\n Function to postprocess dictionary returned by run_qpe\n\n Args:\n out: dictionary containing results/information associated with QPE run as produced by run_qpe\n \"\"\"\n \n # unpack results\n circ = out['circuit']\n measurement_counts = out['measurement_counts']\n bitstring_keys = out['bitstring_keys']\n probs_values = out['probs_values']\n precision_results_dic = out['precision_results_dic']\n phases_decimal = out['phases_decimal']\n eigenvalues = out['eigenvalues']\n \n # print the circuit \n print('Printing circuit:')\n print(circ)\n \n # print measurement results\n print('Measurement counts:', measurement_counts)\n \n # plot probabalities\n plt.bar(bitstring_keys, probs_values);\n plt.xlabel('bitstrings');\n plt.ylabel('probability');\n plt.xticks(rotation=90);\n\n # print results\n print('Results in precision register:', precision_results_dic)\n print('QPE phase estimates:', phases_decimal)\n print('QPE eigenvalue estimates:', np.round(eigenvalues, 5))", "_____no_output_____" ] ], [ [ "### NUMERICAL TEST EXAMPLE 1", "_____no_output_____" ], [ "First, apply the QPE algorithm to the simple single-qubit unitary $U=X$, with eigenstate $|\\Psi\\rangle = |+\\rangle = H|0\\rangle$. Here, we expect to measure the phase $\\varphi=0$ (giving the corresponding eigenvalue $\\lambda=1$). \nWe show that this result stays the same as we increase the number of qubits $n$ for the top register. ", "_____no_output_____" ] ], [ [ "# Set total number of precision qubits: 2\nnumber_precision_qubits = 2\n\n# Define the set of precision qubits\nprecision_qubits = range(number_precision_qubits)\n\n# Define the query qubits. We'll have them start after the precision qubits\nquery_qubits = [number_precision_qubits]\n\n# State preparation for eigenstate of U=X\nquery = Circuit().h(query_qubits)\n\n# Run the test with U=X\nout = run_qpe(X, precision_qubits, query_qubits, query, device, s3_folder)\n\n# Postprocess results\npostprocess_qpe_results(out)", "Printing circuit:\nT : |0|1|2| 3 |4| 5 |6|Result Types|\n \nq0 : -H---U-SWAP---PHASE(-1.57)-H-Probability--\n | | | | \nq1 : -H-U-|-SWAP-H-C--------------Probability--\n | | | \nq2 : -H-U-U-----------------------Probability--\n\nT : |0|1|2| 3 |4| 5 |6|Result Types|\nMeasurement counts: Counter({'000': 504, '001': 496})\nResults in precision register: {'00': 1000}\nQPE phase estimates: [0.0]\nQPE eigenvalue estimates: [1.+0.j]\n" ] ], [ [ "Next, check that we get the same result for a larger precision (top) register. ", "_____no_output_____" ] ], [ [ "# Set total number of precision qubits: 3\nnumber_precision_qubits = 3\n\n# Define the set of precision qubits\nprecision_qubits = range(number_precision_qubits)\n\n# Define the query qubits. We'll have them start after the precision qubits\nquery_qubits = [number_precision_qubits]\n\n# State preparation for eigenstate of U=X\nquery = Circuit().h(query_qubits)\n\n# Run the test with U=X\nout = run_qpe(X, precision_qubits, query_qubits, query, device, s3_folder)\n\n# Postprocess results\npostprocess_qpe_results(out)", "Printing circuit:\nT : |0|1|2|3| 4 |5| 6 | 7 | 8 |9|Result Types|\n \nq0 : -H-----U-SWAP------------------PHASE(-0.785)-PHASE(-1.57)-H-Probability--\n | | | | | \nq1 : -H---U-|-|------PHASE(-1.57)-H-|-------------C--------------Probability--\n | | | | | | \nq2 : -H-U-|-|-SWAP-H-C--------------C----------------------------Probability--\n | | | | \nq3 : -H-U-U-U----------------------------------------------------Probability--\n\nT : |0|1|2|3| 4 |5| 6 | 7 | 8 |9|Result Types|\nMeasurement counts: Counter({'0000': 504, '0001': 496})\nResults in precision register: {'000': 1000}\nQPE phase estimates: [0.0]\nQPE eigenvalue estimates: [1.+0.j]\n" ] ], [ [ "### NUMERICAL TEST EXAMPLE 2", "_____no_output_____" ], [ "Next, apply the QPE algorithm to the simple single-qubit unitary $U=X$, with eigenstate $|\\Psi\\rangle = |-\\rangle = HX|0\\rangle$. \nHere, we expect to measure the phase $\\varphi=0.5$ (giving the corresponding eigenvalue $\\lambda=-1$). ", "_____no_output_____" ] ], [ [ "# Set total number of precision qubits: 2\nnumber_precision_qubits = 2\n\n# Define the set of precision qubits\nprecision_qubits = range(number_precision_qubits)\n\n# Define the query qubits. We'll have them start after the precision qubits\nquery_qubits = [number_precision_qubits]\n\n# State preparation for eigenstate of U=X\nquery = Circuit().x(query_qubits).h(query_qubits)\n\n# Run the test with U=X\nout = run_qpe(X, precision_qubits, query_qubits, query, device, s3_folder)\n\n# Postprocess results\npostprocess_qpe_results(out)", "Printing circuit:\nT : |0|1|2|3| 4 |5| 6 |7|Result Types|\n \nq0 : -H-----U-SWAP---PHASE(-1.57)-H-Probability--\n | | | | \nq1 : -H---U-|-SWAP-H-C--------------Probability--\n | | | \nq2 : -X-H-U-U-----------------------Probability--\n\nT : |0|1|2|3| 4 |5| 6 |7|Result Types|\nMeasurement counts: Counter({'100': 516, '101': 484})\nResults in precision register: {'10': 1000}\nQPE phase estimates: [0.5]\nQPE eigenvalue estimates: [-1.+0.j]\n" ] ], [ [ "### NUMERICAL TEST EXAMPLE 3", "_____no_output_____" ], [ "Next, apply the QPE algorithm again to the simple single-qubit unitary $U=X$, but we initialize the query register in the state $|\\Psi\\rangle = |1\\rangle$ which is *not* an eigenstate of $U$. \nHere, following the previous discussion, we expect to measure the phases $\\varphi=0, 0.5$ (giving the corresponding eigenvalue $\\lambda=\\pm 1$). Accordingly, here we set ```items_to_keep=2```. ", "_____no_output_____" ] ], [ [ "# Set total number of precision qubits: 2\nnumber_precision_qubits = 2\n\n# Define the set of precision qubits\nprecision_qubits = range(number_precision_qubits)\n\n# Define the query qubits. We'll have them start after the precision qubits\nquery_qubits = [number_precision_qubits]\n\n# State preparation for |1>, which is not an eigenstate of U=X\nquery = Circuit().x(query_qubits)\n\n# Run the test with U=X\nout = run_qpe(X, precision_qubits, query_qubits, query, device, s3_folder, items_to_keep=2)\n\n# Postprocess results\npostprocess_qpe_results(out)", "Printing circuit:\nT : |0|1|2| 3 |4| 5 |6|Result Types|\n \nq0 : -H---U-SWAP---PHASE(-1.57)-H-Probability--\n | | | | \nq1 : -H-U-|-SWAP-H-C--------------Probability--\n | | | \nq2 : -X-U-U-----------------------Probability--\n\nT : |0|1|2| 3 |4| 5 |6|Result Types|\nMeasurement counts: Counter({'000': 261, '100': 256, '101': 242, '001': 241})\nResults in precision register: {'10': 498, '00': 502}\nQPE phase estimates: [0.0, 0.5]\nQPE eigenvalue estimates: [ 1.+0.j -1.+0.j]\n" ] ], [ [ "### NUMERICAL TEST EXAMPLE 4", "_____no_output_____" ], [ "Next, apply the QPE algorithm to the two-qubit unitary $U=X \\otimes Z$, and prepare the query register in the eigenstate $|\\Psi\\rangle = |+\\rangle \\otimes |1\\rangle = H|0\\rangle \\otimes Z|0\\rangle$. \nHere, we expect to measure the phase $\\varphi=0.5$ (giving the corresponding eigenvalue $\\lambda=-1$). ", "_____no_output_____" ] ], [ [ "# set unitary matrix U\nu1 = np.kron(X, Id) \nu2 = np.kron(Id, Z)\nunitary = np.dot(u1, u2)\nprint('Two-qubit unitary (XZ):\\n', unitary)\n\n# get example eigensystem \neig_values, eig_vectors = np.linalg.eig(unitary)\nprint('Eigenvalues:', eig_values)\n# print('Eigenvectors:', eig_vectors)", "Two-qubit unitary (XZ):\n [[ 0. 0. 1. 0.]\n [ 0. 0. 0. -1.]\n [ 1. 0. 0. 0.]\n [ 0. -1. 0. 0.]]\nEigenvalues: [ 1. -1. 1. -1.]\n" ], [ "# Set total number of precision qubits: 2\nnumber_precision_qubits = 2\n\n# Define the set of precision qubits\nprecision_qubits = range(number_precision_qubits)\n\n# Define the query qubits. We'll have them start after the precision qubits\nquery_qubits = [number_precision_qubits, number_precision_qubits+1]\n\n# State preparation for eigenstate |+,1> of U=X \\otimes Z\nquery = Circuit().h(query_qubits[0]).x(query_qubits[1])\n\n# Run the test with U=X\nout = run_qpe(unitary, precision_qubits, query_qubits, query, device, s3_folder)\n\n# Postprocess results\npostprocess_qpe_results(out)", "Printing circuit:\nT : |0|1|2| 3 |4| 5 |6|Result Types|\n \nq0 : -H---U-SWAP---PHASE(-1.57)-H-Probability--\n | | | | \nq1 : -H-U-|-SWAP-H-C--------------Probability--\n | | | \nq2 : -H-U-U-----------------------Probability--\n | | | \nq3 : -X-U-U-----------------------Probability--\n\nT : |0|1|2| 3 |4| 5 |6|Result Types|\nMeasurement counts: Counter({'1011': 503, '1001': 497})\nResults in precision register: {'10': 1000}\nQPE phase estimates: [0.5]\nQPE eigenvalue estimates: [-1.+0.j]\n" ] ], [ [ "### NUMERICAL TEST EXAMPLE 5", "_____no_output_____" ], [ "In this example, we choose the unitary to be a _random_ two-qubit unitary, diagonal in the computational basis. We initialize the query register to be in the eigenstate $|11\\rangle$ of $U$, which we can prepare using that $|11\\rangle = X\\otimes X|00\\rangle$.\nIn this case we should be able to read off the eigenvalue and phase from $U$ and verify that QPE gives the right answer.", "_____no_output_____" ] ], [ [ "# Generate a random 2 qubit unitary matrix:\nfrom scipy.stats import unitary_group\n\n# Fix random seed for reproducibility\nnp.random.seed(seed=42)\n\n# Get random two-qubit unitary\nrandom_unitary = unitary_group.rvs(2**2)\n\n# Let's diagonalize this\nevals = np.linalg.eig(random_unitary)[0]\n\n# Since we want to be able to read off the eigenvalues of the unitary in question\n# let's choose our unitary to be diagonal in this basis\nunitary = np.diag(evals)\n\n# Check that this is indeed unitary, and print it out:\nprint('Two-qubit random unitary:\\n', np.round(unitary, 3))\nprint('Check for unitarity: ', np.allclose(np.eye(len(unitary)), unitary.dot(unitary.T.conj())))\n\n# Print eigenvalues\nprint('Eigenvalues:', np.round(evals, 3))", "Two-qubit random unitary:\n [[-0.078+0.997j 0. +0.j 0. +0.j 0. +0.j ]\n [ 0. +0.j -0.987-0.159j 0. +0.j 0. +0.j ]\n [ 0. +0.j 0. +0.j 0.192-0.981j 0. +0.j ]\n [ 0. +0.j 0. +0.j 0. +0.j 0.747-0.665j]]\nCheck for unitarity: True\nEigenvalues: [-0.078+0.997j -0.987-0.159j 0.192-0.981j 0.747-0.665j]\n" ] ], [ [ "When we execute the QPE circuit, we expect the following (approximate) result for the eigenvalue estimate: ", "_____no_output_____" ] ], [ [ "print('Target eigenvalue:', np.round(evals[-1], 3))", "Target eigenvalue: (0.747-0.665j)\n" ], [ "# Set total number of precision qubits\nnumber_precision_qubits = 3\n\n# Define the set of precision qubits\nprecision_qubits = range(number_precision_qubits)\n\n# Define the query qubits. We'll have them start after the precision qubits\nquery_qubits = [number_precision_qubits, number_precision_qubits+1]\n\n# State preparation for eigenstate |1,1> of diagonal U\nquery = Circuit().x(query_qubits[0]).x(query_qubits[1])\n\n# Run the test with U=X\nout = run_qpe(unitary, precision_qubits, query_qubits, query, device, s3_folder)\n\n# Postprocess results\npostprocess_qpe_results(out)\n\n# compare output to exact target values\nprint('Target eigenvalue:', np.round(evals[-1], 3))", "Printing circuit:\nT : |0|1|2|3| 4 |5| 6 | 7 | 8 |9|Result Types|\n \nq0 : -H-----U-SWAP------------------PHASE(-0.785)-PHASE(-1.57)-H-Probability--\n | | | | | \nq1 : -H---U-|-|------PHASE(-1.57)-H-|-------------C--------------Probability--\n | | | | | | \nq2 : -H-U-|-|-SWAP-H-C--------------C----------------------------Probability--\n | | | | \nq3 : -X-U-U-U----------------------------------------------------Probability--\n | | | | \nq4 : -X-U-U-U----------------------------------------------------Probability--\n\nT : |0|1|2|3| 4 |5| 6 | 7 | 8 |9|Result Types|\nMeasurement counts: Counter({'11111': 986, '11011': 8, '00011': 3, '01011': 1, '10011': 1, '10111': 1})\nResults in precision register: {'100': 1, '101': 1, '111': 986, '000': 3, '010': 1, '110': 8}\nQPE phase estimates: [0.875]\nQPE eigenvalue estimates: [0.70711-0.70711j]\nTarget eigenvalue: (0.747-0.665j)\n" ] ], [ [ "We can easily improve the precision of our parameter estimate by increasing the number of qubits in the precision register, as shown in the following example. ", "_____no_output_____" ] ], [ [ "# Set total number of precision qubits\nnumber_precision_qubits = 10\n\n# Define the set of precision qubits\nprecision_qubits = range(number_precision_qubits)\n\n# Define the query qubits. We'll have them start after the precision qubits\nquery_qubits = [number_precision_qubits, number_precision_qubits+1]\n\n# State preparation for eigenstate |1,1> of diagonal U\nquery = Circuit().x(query_qubits[0]).x(query_qubits[1])\n\n# Run the test with U=X\nout = run_qpe(unitary, precision_qubits, query_qubits, query, device, s3_folder)\n\n# Postprocess results\neigenvalues = out['eigenvalues']\nprint('QPE eigenvalue estimates:', np.round(eigenvalues, 5))\n\n# compare output to exact target values\nprint('Target eigenvalue:', np.round(evals[-1], 5))", "QPE eigenvalue estimates: [0.74506-0.667j]\nTarget eigenvalue: (0.74699-0.66484j)\n" ] ], [ [ "---\n## APPENDIX", "_____no_output_____" ] ], [ [ "# Check SDK version\n# alternative: braket.__version__\n!pip show amazon-braket-sdk | grep Version", "Version: 0.6.0\r\n" ] ], [ [ "## Details of the ```utiles_qpe.py``` module", "_____no_output_____" ], [ "### Imports, including inverse QFT", "_____no_output_____" ], [ "```python\n# general imports\nimport numpy as np\nimport math\nfrom collections import Counter\nfrom datetime import datetime\nimport pickle\n\n# AWS imports: Import Braket SDK modules\nfrom braket.circuits import Circuit, circuit\n\n# local imports\nfrom utils_qft import inverse_qft\n```", "_____no_output_____" ], [ "### QPE Subroutine", "_____no_output_____" ], [ "```python\[email protected](register=True)\ndef controlled_unitary(control, target_qubits, unitary):\n \"\"\"\n Construct a circuit object corresponding to the controlled unitary\n\n Args:\n control: The qubit on which to control the gate\n\n target_qubits: List of qubits on which the unitary U acts\n\n unitary: matrix representation of the unitary we wish to implement in a controlled way\n \"\"\"\n\n # Define projectors onto the computational basis\n p0 = np.array([[1., 0.],\n [0., 0.]])\n\n p1 = np.array([[0., 0.],\n [0., 1.]])\n\n # Instantiate circuit object\n circ = Circuit()\n\n # Construct numpy matrix\n id_matrix = np.eye(len(unitary))\n controlled_matrix = np.kron(p0, id_matrix) + np.kron(p1, unitary)\n\n # Set all target qubits\n targets = [control] + target_qubits\n\n # Add controlled unitary\n circ.unitary(matrix=controlled_matrix, targets=targets)\n\n return circ\n\n\[email protected](register=True)\ndef qpe(precision_qubits, query_qubits, unitary, control_unitary=True):\n \"\"\"\n Function to implement the QPE algorithm using two registers for precision (read-out) and query.\n Register qubits need not be contiguous.\n\n Args:\n precision_qubits: list of qubits defining the precision register\n\n query_qubits: list of qubits defining the query register\n\n unitary: Matrix representation of the unitary whose eigenvalues we wish to estimate\n\n control_unitary: Optional boolean flag for controlled unitaries,\n with C-(U^{2^k}) by default (default is True),\n or C-U controlled-unitary (2**power) times\n \"\"\"\n qpe_circ = Circuit()\n\n # Get number of qubits\n num_precision_qubits = len(precision_qubits)\n num_query_qubits = len(query_qubits)\n\n # Apply Hadamard across precision register\n qpe_circ.h(precision_qubits)\n\n # Apply controlled unitaries. Start with the last precision_qubit, and end with the first\n for ii, qubit in enumerate(reversed(precision_qubits)):\n # Set power exponent for unitary\n power = ii\n\n # Alterantive 1: Implement C-(U^{2^k})\n if control_unitary:\n # Define the matrix U^{2^k}\n Uexp = np.linalg.matrix_power(unitary,2**power)\n\n # Apply the controlled unitary C-(U^{2^k})\n qpe_circ.controlled_unitary(qubit, query_qubits, Uexp)\n # Alterantive 2: One can instead apply controlled-unitary (2**power) times to get C-U^{2^power}\n else:\n for _ in range(2**power):\n qpe_circ.controlled_unitary(qubit, query_qubits, unitary)\n\n # Apply inverse qft to the precision_qubits\n qpe_circ.inverse_qft(precision_qubits)\n\n return qpe_circ\n```", "_____no_output_____" ], [ "### QPE postprocessing helper functions", "_____no_output_____" ], [ "```python\n# helper function to remove query bits from bitstrings\ndef substring(key, precision_qubits):\n \"\"\"\n Helper function to get substring from keys for dedicated string positions as given by precision_qubits.\n This function is necessary to allow for arbitary qubit mappings in the precision and query registers\n (that is, so that the register qubits need not be contiguous.)\n\n Args:\n key: string from which we want to extract the substring supported only on the precision qubits\n\n precision_qubits: List of qubits corresponding to precision_qubits.\n Currently assumed to be a list of integers corresponding to the indices of the qubits\n \"\"\"\n short_key = ''\n for idx in precision_qubits:\n short_key = short_key + key[idx]\n\n return short_key\n\n\n# helper function to convert binary fractional to decimal\n# reference: https://www.geeksforgeeks.org/convert-binary-fraction-decimal/\ndef binaryToDecimal(binary):\n \"\"\"\n Helper function to convert binary string (example: '01001') to decimal\n\n Args:\n binary: string which to convert to decimal fraction\n \"\"\"\n\n length = len(binary)\n fracDecimal = 0\n\n # Convert fractional part of binary to decimal equivalent\n twos = 2\n\n for ii in range(length):\n fracDecimal += ((ord(binary[ii]) - ord('0')) / twos);\n twos *= 2.0\n\n # return fractional part\n return fracDecimal\n\n\n# helper function for postprocessing based on measurement shots\ndef get_qpe_phases(measurement_counts, precision_qubits, items_to_keep=1):\n \"\"\"\n Get QPE phase estimate from measurement_counts for given number of precision qubits\n\n Args:\n measurement_counts: measurement results from a device run\n\n precision_qubits: List of qubits corresponding to precision_qubits.\n Currently assumed to be a list of integers corresponding to the indices of the qubits\n\n items_to_keep: number of items to return (topmost measurement counts for precision register)\n \"\"\"\n\n # Aggregate the results (that is, ignore the query register qubits):\n\n # First get bitstrings with corresponding counts for precision qubits only\n bitstrings_precision_register = [substring(key, precision_qubits) for key in measurement_counts.keys()]\n # Then keep only the unique strings\n bitstrings_precision_register_set = set(bitstrings_precision_register)\n # Cast as a list for later use\n bitstrings_precision_register_list = list(bitstrings_precision_register_set)\n\n # Now create a new dict to collect measurement results on the precision_qubits.\n # Keys are given by the measurement count substrings on the register qubits. Initialize the counts to zero.\n precision_results_dic = {key: 0 for key in bitstrings_precision_register_list}\n\n # Loop over all measurement outcomes\n for key in measurement_counts.keys():\n # Save the measurement count for this outcome\n counts = measurement_counts[key]\n # Generate the corresponding shortened key (supported only on the precision_qubits register)\n count_key = substring(key, precision_qubits)\n # Add these measurement counts to the corresponding key in our new dict\n precision_results_dic[count_key] += counts\n\n # Get topmost values only\n c = Counter(precision_results_dic)\n topmost= c.most_common(items_to_keep)\n # get decimal phases from bitstrings for topmost bitstrings\n phases_decimal = [binaryToDecimal(item[0]) for item in topmost]\n\n # Get decimal phases from bitstrings for all bitstrings\n # number_precision_qubits = len(precision_qubits)\n # Generate binary decimal expansion\n # phases_decimal = [int(key, 2)/(2**number_precision_qubits) for key in precision_results_dic]\n # phases_decimal = [binaryToDecimal(key) for key in precision_results_dic]\n\n return phases_decimal, precision_results_dic\n```", "_____no_output_____" ], [ "### Run QPE experiments:", "_____no_output_____" ], [ "```python\ndef run_qpe(unitary, precision_qubits, query_qubits, query_circuit,\n device, s3_folder, items_to_keep=1, shots=1000, save_to_pck=False):\n \"\"\"\n Function to run QPE algorithm end-to-end and return measurement counts.\n\n Args:\n precision_qubits: list of qubits defining the precision register\n\n query_qubits: list of qubits defining the query register\n\n unitary: Matrix representation of the unitary whose eigenvalues we wish to estimate\n\n query_circuit: query circuit for state preparation of query register\n\n items_to_keep: (optional) number of items to return (topmost measurement counts for precision register)\n\n device: Braket device backend\n\n shots: (optional) number of measurement shots (default is 1000)\n\n save_to_pck: (optional) save results to pickle file if True (default is False)\n \"\"\"\n\n # get size of precision register and total number of qubits\n number_precision_qubits = len(precision_qubits)\n num_qubits = len(precision_qubits) + len(query_qubits)\n\n # Define the circuit. Start by copying the query_circuit, then add the QPE:\n circ = query_circuit\n circ.qpe(precision_qubits, query_qubits, unitary)\n\n # Add desired results_types\n circ.probability()\n\n # Run the circuit with all zeros input.\n # The query_circuit subcircuit generates the desired input from all zeros.\n # The following code executes the correct device.run call, depending on whether the backend is local or managed (cloud-based)\n if device.name == 'DefaultSimulator':\n task = device.run(circ, shots=shots)\n else:\n task = device.run(circ, s3_folder, shots=shots)\n\n # get result for this task\n result = task.result()\n\n # get metadata\n metadata = result.task_metadata\n\n # get output probabilities (see result_types above)\n probs_values = result.values[0]\n\n # get measurement results\n measurements = result.measurements\n measured_qubits = result.measured_qubits\n measurement_counts = result.measurement_counts\n measurement_probabilities = result.measurement_probabilities\n\n # bitstrings\n format_bitstring = '{0:0' + str(num_qubits) + 'b}'\n bitstring_keys = [format_bitstring.format(ii) for ii in range(2**num_qubits)]\n\n # QPE postprocessing\n phases_decimal, precision_results_dic = get_qpe_phases(measurement_counts, precision_qubits, items_to_keep)\n eigenvalues = [np.exp(2*np.pi*1j*phase) for phase in phases_decimal]\n\n # aggregate results\n out = {'circuit': circ,\n 'task_metadata': metadata,\n 'measurements': measurements,\n 'measured_qubits': measured_qubits,\n 'measurement_counts': measurement_counts,\n 'measurement_probabilities': measurement_probabilities,\n 'probs_values': probs_values,\n 'bitstring_keys': bitstring_keys,\n 'precision_results_dic': precision_results_dic,\n 'phases_decimal': phases_decimal,\n 'eigenvalues': eigenvalues}\n\n if save_to_pck:\n # store results: dump output to pickle with timestamp in filename\n time_now = datetime.strftime(datetime.now(), '%Y%m%d%H%M%S')\n results_file = 'results-'+time_now+'.pck'\n pickle.dump(out, open(results_file, \"wb\"))\n # you can load results as follows\n # out = pickle.load(open(results_file, \"rb\"))\n\n return out\n```", "_____no_output_____" ], [ "---\n## REFERENCES\n\n[1] Wikipedia: https://en.wikipedia.org/wiki/Quantum_phase_estimation_algorithm\n\n[2] Nielsen, Michael A., Chuang, Isaac L. (2010). Quantum Computation and Quantum Information (2nd ed.). Cambridge: Cambridge University Press.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
4a799e5d39a181b6110fdb8482c822a62f5eba6f
62,516
ipynb
Jupyter Notebook
NoteBooks/Curso de WebScraping/Unificado/web-scraping-master/Clases/Módulo 4_ APIs/Clases no script/M4C5 - Fin del proyecto + Bonus.ipynb
Alejandro-sin/Learning_Notebooks
161d6bed4c7b1d171b45f61c0cc6fa91e9894aad
[ "MIT" ]
1
2021-02-26T13:12:22.000Z
2021-02-26T13:12:22.000Z
NoteBooks/Curso de WebScraping/Unificado/web-scraping-master/Clases/Módulo 4_ APIs/Clases no script/M4C5 - Fin del proyecto + Bonus.ipynb
Alejandro-sin/Learning_Notebooks
161d6bed4c7b1d171b45f61c0cc6fa91e9894aad
[ "MIT" ]
null
null
null
NoteBooks/Curso de WebScraping/Unificado/web-scraping-master/Clases/Módulo 4_ APIs/Clases no script/M4C5 - Fin del proyecto + Bonus.ipynb
Alejandro-sin/Learning_Notebooks
161d6bed4c7b1d171b45f61c0cc6fa91e9894aad
[ "MIT" ]
null
null
null
39.567089
581
0.523658
[ [ [ "# Módulo 4: APIs\n## Spotify\n<img src=\"https://developer.spotify.com/assets/branding-guidelines/[email protected]\" width=400></img>\n\nEn este módulo utilizaremos APIs para obtener información sobre artistas, discos y tracks disponibles en Spotify. Pero primero.. ¿Qué es una **API**?<br>\nPor sus siglas en inglés, una API es una interfaz para programar aplicaciones (*Application Programming Interface*). Es decir que es un conjunto de funciones, métodos, reglas y definiciones que nos permitirán desarrollar aplicaciones (en este caso un scraper) que se comuniquen con los servidores de Spotify. Las APIs son diseñadas y desarrolladas por las empresas que tienen interés en que se desarrollen aplicaciones (públicas o privadas) que utilicen sus servicios. Spotify tiene APIs públicas y bien documentadas que estaremos usando en el desarrollo de este proyecto.\n#### REST\nUn término se seguramente te vas a encontrar cuando estés buscando información en internet es **REST** o *RESTful*. Significa *representational state transfer* y si una API es REST o RESTful, implica que respeta unos determinados principios de arquitectura, como por ejemplo un protocolo de comunicación cliente/servidor (que será HTTP) y (entre otras cosas) un conjunto de operaciones definidas que conocemos como **métodos**. Ya veníamos usando el método GET para hacer solicitudes a servidores web.\n#### Documentación\nComo mencioné antes, las APIs son diseñadas por las mismas empresas que tienen interés en que se desarrollen aplicaciones (públicas o privadas) que consuman sus servicios o información. Es por eso que la forma de utilizar las APIs variará dependiendo del servicio que querramos consumir. No es lo mismo utilizar las APIs de Spotify que las APIs de Twitter. Por esta razón es de suma importancia leer la documentación disponible, generalmente en la sección de desarrolladores de cada sitio. Te dejo el [link a la de Spotify](https://developer.spotify.com/documentation/)\n#### JSON\nJson significa *JavaScript Object Notation* y es un formato para describir objetos que ganó tanta popularidad en su uso que ahora se lo considera independiente del lenguaje. De hecho, lo utilizaremos en este proyecto por más que estemos trabajando en Python, porque es la forma en la que obtendremos las respuestas a las solicitudes que realicemos utilizando las APIs. Para nosotros, no será ni más ni menos que un diccionario con algunas particularidades que iremos viendo a lo largo del curso.\n\n", "_____no_output_____" ], [ "Links útiles para la clase:\n- [Documentación de Spotify - Artistas](https://developer.spotify.com/documentation/web-api/reference/artists/)\n- [Iron Maiden en Spotify](https://open.spotify.com/artist/6mdiAmATAx73kdxrNrnlao)", "_____no_output_____" ] ], [ [ "import requests", "_____no_output_____" ], [ "id_im = '6mdiAmATAx73kdxrNrnlao'", "_____no_output_____" ], [ "url_base = 'https://api.spotify.com/v1'", "_____no_output_____" ], [ "ep_artist = '/artists/{artist_id}'", "_____no_output_____" ], [ "url_base+ep_artist.format(artist_id=id_im)", "_____no_output_____" ], [ "r = requests.get(url_base+ep_artist.format(artist_id=id_im))", "_____no_output_____" ], [ "r.status_code", "_____no_output_____" ], [ "r.json()", "_____no_output_____" ], [ "token_url = 'https://accounts.spotify.com/api/token'", "_____no_output_____" ], [ "params = {'grant_type': 'client_credentials'}", "_____no_output_____" ], [ "headers = {'Authorization': 'Basic NDRiN2IzNmVjMTQ1NDY3ZjlhOWVlYWY3ZTQxN2NmOGI6N2I0YWE3YTBlZjQ4NDQwNDhhYjFkMjI0MzBhMWViMWY='}", "_____no_output_____" ], [ "r = requests.post(token_url, data=params, headers=headers)", "_____no_output_____" ], [ "r.status_code", "_____no_output_____" ], [ "r.json()", "_____no_output_____" ], [ "token = r.json()['access_token']\ntoken", "_____no_output_____" ], [ "header = {\"Authorization\": \"Bearer {}\".format(token)}", "_____no_output_____" ], [ "r = requests.get(url_base+ep_artist.format(artist_id=id_im), headers=header)", "_____no_output_____" ], [ "r.status_code", "_____no_output_____" ], [ "r.json()", "_____no_output_____" ], [ "url_busqueda = 'https://api.spotify.com/v1/search'", "_____no_output_____" ], [ "search_params = {'q': \"Iron+Maiden\", 'type':'artist', 'market':'AR'}", "_____no_output_____" ], [ "busqueda = requests.get(url_busqueda, headers=header, params=search_params)", "_____no_output_____" ], [ "busqueda.status_code", "_____no_output_____" ], [ "busqueda.json()", "_____no_output_____" ], [ "import pandas as pd", "_____no_output_____" ], [ "df = pd.DataFrame(busqueda.json()['artists']['items'])\ndf.head()", "_____no_output_____" ], [ "df.sort_values(by='popularity', ascending=False).iloc[0]['id']", "_____no_output_____" ], [ "import base64\ndef get_token(client_id, client_secret):\n encoded = base64.b64encode(bytes(client_id+':'+client_secret, 'utf-8'))\n params = {'grant_type':'client_credentials'}\n header={'Authorization': 'Basic ' + str(encoded, 'utf-8')}\n r = requests.post('https://accounts.spotify.com/api/token', headers=header, data=params)\n if r.status_code != 200:\n print('Error en la request.', r.json())\n return None\n print('Token válido por {} segundos.'.format(r.json()['expires_in']))\n return r.json()['access_token']", "_____no_output_____" ], [ "client_id = '44b7b36ec145467f9a9eeaf7e417cf8b'\nclient_secret = '7b4aa7a0ef4844048ab1d22430a1eb1f'", "_____no_output_____" ], [ "token = get_token(client_id, client_secret)", "Token válido por 3600 segundos.\n" ], [ "header = {\"Authorization\": \"Bearer {}\".format(token)}", "_____no_output_____" ], [ "id_im", "_____no_output_____" ], [ "artist_im = requests.get(url_base+ep_artist.format(artist_id=id_im), headers=header)\nartist_im.status_code", "_____no_output_____" ], [ "artist_im.json()", "_____no_output_____" ], [ "params = {'country': 'AR'}", "_____no_output_____" ], [ "albums_im = requests.get(url_base+ep_artist.format(artist_id=id_im)+'/albums', headers=header, params=params)\nalbums_im.status_code", "_____no_output_____" ], [ "albums_im.json()['items']", "_____no_output_____" ], [ "[(album['id'], album['name']) for album in albums_im.json()['items']]", "_____no_output_____" ], [ "bnw_id = '1hDF0QPIHVTnSJtxyQVguB'", "_____no_output_____" ], [ "album_ep = '/albums/{album_id}'", "_____no_output_____" ], [ "album_params = {'market':'AR'}", "_____no_output_____" ], [ "bnw = requests.get(url_base+album_ep.format(album_id=bnw_id)+'/tracks', headers=header, params=album_params)\nbnw", "_____no_output_____" ], [ "bnw.json()", "_____no_output_____" ], [ "bnw.json()['items']", "_____no_output_____" ], [ "[(track['id'], track['name']) for track in bnw.json()['items']]", "_____no_output_____" ] ], [ [ "## Clase 5", "_____no_output_____" ] ], [ [ "def obtener_discografia(artist_id, token, return_name=False, page_limit=50, country=None):\n url = f'https://api.spotify.com/v1/artists/{artist_id}/albums'\n header = {'Authorization': f'Bearer {token}'}\n params = {'limit': page_limit, \n 'offset': 0,\n 'country': country}\n lista = []\n r = requests.get(url, params=params, headers=header)\n \n if r.status_code != 200:\n print('Error en request.', r.json())\n return None\n \n if return_name:\n lista += [(item['id'], item['name']) for item in r.json()['items']]\n else:\n lista += [item['id'] for item in r.json()['items']]\n \n while r.json()['next']:\n r = requests.get(r.json()['next'], headers=header) # El resto de los parámetros están dentro de la URL\n if return_name:\n lista += [(item['id'], item['name']) for item in r.json()['items']]\n else:\n lista += [item['id'] for item in r.json()['items']]\n \n return lista", "_____no_output_____" ], [ "def obtener_tracks(album_id, token, return_name=False, page_limit=50, market=None):\n url=f'https://api.spotify.com/v1/albums/{album_id}/tracks'\n header = {'Authorization': f'Bearer {token}'}\n params = {'limit': page_limit, \n 'offset': 0,\n 'market': market}\n lista = []\n r = requests.get(url, params=params, headers=header)\n \n if r.status_code != 200:\n print('Error en request.', r.json())\n return None\n \n if return_name:\n lista += [(item['id'], item['name']) for item in r.json()['items']]\n else:\n lista += [item['id'] for item in r.json()['items']]\n \n while r.json()['next']:\n r = requests.get(r.json()['next'], headers=header) # El resto de los parámetros están dentro de la URL\n if return_name:\n lista += [(item['id'], item['name']) for item in r.json()['items']]\n else:\n lista += [item['id'] for item in r.json()['items']]\n \n return lista", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
4a799ef4dabe11c252bf3467c68a179077b3793c
21,429
ipynb
Jupyter Notebook
TreesEnsembles_part3.ipynb
univai-ghf/TreesAndEnsembleWorkshop
4aee6ac11f52bdc0c0a0f8df03e649d3ceac61ba
[ "MIT" ]
null
null
null
TreesEnsembles_part3.ipynb
univai-ghf/TreesAndEnsembleWorkshop
4aee6ac11f52bdc0c0a0f8df03e649d3ceac61ba
[ "MIT" ]
null
null
null
TreesEnsembles_part3.ipynb
univai-ghf/TreesAndEnsembleWorkshop
4aee6ac11f52bdc0c0a0f8df03e649d3ceac61ba
[ "MIT" ]
null
null
null
35.478477
897
0.49181
[ [ [ "# Data Inputs and Display Libraries", "_____no_output_____" ] ], [ [ "\nimport pandas as pd\nimport numpy as np\nimport pickle\npd.set_option('display.float_format', lambda x: '%.5f' % x)\nfrom IPython.core.interactiveshell import InteractiveShell\nInteractiveShell.ast_node_interactivity = 'all'", "_____no_output_____" ] ], [ [ "# Modeling Libraries", "_____no_output_____" ] ], [ [ "from sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn import model_selection\nfrom xgboost import XGBClassifier\nimport pickle\nfrom sklearn.model_selection import GridSearchCV", "_____no_output_____" ] ], [ [ "# Metrics Libraries\n", "_____no_output_____" ] ], [ [ "from sklearn.metrics import accuracy_score\nfrom sklearn.metrics import confusion_matrix,ConfusionMatrixDisplay\nfrom sklearn.metrics import roc_curve\nfrom sklearn.metrics import auc\nfrom sklearn.metrics import f1_score\nfrom matplotlib import pyplot\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import average_precision_score\nfrom sklearn.metrics import precision_recall_curve", "_____no_output_____" ], [ "# Accessing the data\n!wget \"https://github.com/univai-ghf/ghfmedia/raw/main/data/Trees_and_Ensembles/datasets.rar\" \n!wget \"https://github.com/univai-ghf/ghfmedia/raw/main/data/Trees_and_Ensembles/prep_file.rar\" \n!wget \"https://github.com/univai-ghf/ghfmedia/raw/main/data/Trees_and_Ensembles/num_cols.csv\" \n!wget \"https://github.com/univai-ghf/ghfmedia/raw/main/data/Trees_and_Ensembles/str_cols.csv\" \n\n#unziping the rar\n!unrar x './datasets.rar'\n!unrar x './prep_file.rar'", "--2022-03-10 07:17:59-- https://github.com/univai-ghf/ghfmedia/raw/main/data/Trees_and_Ensembles/datasets.rar\nResolving github.com (github.com)... 13.114.40.48\nConnecting to github.com (github.com)|13.114.40.48|:443... connected.\nHTTP request sent, awaiting response... 302 Found\nLocation: https://raw.githubusercontent.com/univai-ghf/ghfmedia/main/data/Trees_and_Ensembles/datasets.rar [following]\n--2022-03-10 07:17:59-- https://raw.githubusercontent.com/univai-ghf/ghfmedia/main/data/Trees_and_Ensembles/datasets.rar\nResolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.108.133, 185.199.111.133, 185.199.110.133, ...\nConnecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.108.133|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 3508143 (3.3M) [application/octet-stream]\nSaving to: ‘datasets.rar’\n\ndatasets.rar 100%[===================>] 3.34M --.-KB/s in 0.08s \n\n2022-03-10 07:18:00 (42.7 MB/s) - ‘datasets.rar’ saved [3508143/3508143]\n\n--2022-03-10 07:18:00-- https://github.com/univai-ghf/ghfmedia/raw/main/data/Trees_and_Ensembles/prep_file.rar\nResolving github.com (github.com)... 13.114.40.48\nConnecting to github.com (github.com)|13.114.40.48|:443... connected.\nHTTP request sent, awaiting response... 302 Found\nLocation: https://raw.githubusercontent.com/univai-ghf/ghfmedia/main/data/Trees_and_Ensembles/prep_file.rar [following]\n--2022-03-10 07:18:01-- https://raw.githubusercontent.com/univai-ghf/ghfmedia/main/data/Trees_and_Ensembles/prep_file.rar\nResolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.111.133, 185.199.110.133, 185.199.109.133, ...\nConnecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.111.133|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 954088 (932K) [application/octet-stream]\nSaving to: ‘prep_file.rar’\n\nprep_file.rar 100%[===================>] 931.73K --.-KB/s in 0.06s \n\n2022-03-10 07:18:01 (16.1 MB/s) - ‘prep_file.rar’ saved [954088/954088]\n\n--2022-03-10 07:18:01-- https://github.com/univai-ghf/ghfmedia/raw/main/data/Trees_and_Ensembles/num_cols.csv\nResolving github.com (github.com)... 13.114.40.48\nConnecting to github.com (github.com)|13.114.40.48|:443... connected.\nHTTP request sent, awaiting response... 302 Found\nLocation: https://raw.githubusercontent.com/univai-ghf/ghfmedia/main/data/Trees_and_Ensembles/num_cols.csv [following]\n--2022-03-10 07:18:02-- https://raw.githubusercontent.com/univai-ghf/ghfmedia/main/data/Trees_and_Ensembles/num_cols.csv\nResolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.108.133, 185.199.111.133, 185.199.110.133, ...\nConnecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.108.133|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 95 [text/plain]\nSaving to: ‘num_cols.csv’\n\nnum_cols.csv 100%[===================>] 95 --.-KB/s in 0s \n\n2022-03-10 07:18:02 (5.21 MB/s) - ‘num_cols.csv’ saved [95/95]\n\n--2022-03-10 07:18:02-- https://github.com/univai-ghf/ghfmedia/raw/main/data/Trees_and_Ensembles/str_cols.csv\nResolving github.com (github.com)... 52.192.72.89\nConnecting to github.com (github.com)|52.192.72.89|:443... connected.\nHTTP request sent, awaiting response... 302 Found\nLocation: https://raw.githubusercontent.com/univai-ghf/ghfmedia/main/data/Trees_and_Ensembles/str_cols.csv [following]\n--2022-03-10 07:18:02-- https://raw.githubusercontent.com/univai-ghf/ghfmedia/main/data/Trees_and_Ensembles/str_cols.csv\nResolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.108.133, 185.199.109.133, 185.199.110.133, ...\nConnecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.108.133|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 82 [text/plain]\nSaving to: ‘str_cols.csv’\n\nstr_cols.csv 100%[===================>] 82 --.-KB/s in 0s \n\n2022-03-10 07:18:03 (5.42 MB/s) - ‘str_cols.csv’ saved [82/82]\n\n\nUNRAR 5.50 freeware Copyright (c) 1993-2017 Alexander Roshal\n\n\nExtracting from /content/datasets.rar\n\nExtracting y_train1 \b\b\b\b 0%\b\b\b\b\b OK \nExtracting df_all_test2 \b\b\b\b 1%\b\b\b\b 2%\b\b\b\b 3%\b\b\b\b 4%\b\b\b\b 5%\b\b\b\b 6%\b\b\b\b 7%\b\b\b\b 8%\b\b\b\b 9%\b\b\b\b 10%\b\b\b\b 11%\b\b\b\b 12%\b\b\b\b 13%\b\b\b\b 14%\b\b\b\b 15%\b\b\b\b 16%\b\b\b\b 17%\b\b\b\b 18%\b\b\b\b 19%\b\b\b\b 20%\b\b\b\b 21%\b\b\b\b 22%\b\b\b\b 23%\b\b\b\b 24%\b\b\b\b 25%\b\b\b\b 26%\b\b\b\b 27%\b\b\b\b 28%\b\b\b\b 29%\b\b\b\b 30%\b\b\b\b 31%\b\b\b\b 32%\b\b\b\b 33%\b\b\b\b\b OK \nExtracting df_all_train2 \b\b\b\b 34%\b\b\b\b 35%\b\b\b\b 36%\b\b\b\b 37%\b\b\b\b 38%\b\b\b\b 39%\b\b\b\b 40%\b\b\b\b 41%\b\b\b\b 42%\b\b\b\b 43%\b\b\b\b 44%\b\b\b\b 45%\b\b\b\b 46%\b\b\b\b 47%\b\b\b\b 48%\b\b\b\b 49%\b\b\b\b 50%\b\b\b\b 51%\b\b\b\b 52%\b\b\b\b 53%\b\b\b\b 54%\b\b\b\b 55%\b\b\b\b 56%\b\b\b\b 57%\b\b\b\b 58%\b\b\b\b 59%\b\b\b\b 60%\b\b\b\b 61%\b\b\b\b 62%\b\b\b\b 63%\b\b\b\b 64%\b\b\b\b 65%\b\b\b\b 66%\b\b\b\b 67%\b\b\b\b 68%\b\b\b\b 69%\b\b\b\b 70%\b\b\b\b 71%\b\b\b\b 72%\b\b\b\b 73%\b\b\b\b 74%\b\b\b\b 75%\b\b\b\b 76%\b\b\b\b 77%\b\b\b\b 78%\b\b\b\b 79%\b\b\b\b 80%\b\b\b\b 81%\b\b\b\b 82%\b\b\b\b 83%\b\b\b\b 84%\b\b\b\b 85%\b\b\b\b 86%\b\b\b\b 87%\b\b\b\b 88%\b\b\b\b 89%\b\b\b\b 90%\b\b\b\b 91%\b\b\b\b 92%\b\b\b\b 93%\b\b\b\b 94%\b\b\b\b 95%\b\b\b\b 96%\b\b\b\b 97%\b\b\b\b 98%\b\b\b\b 99%\b\b\b\b\b OK \nExtracting sel_cols \b\b\b\b 99%\b\b\b\b\b OK \nExtracting y_test1 \b\b\b\b 99%\b\b\b\b\b OK \nAll OK\n\nUNRAR 5.50 freeware Copyright (c) 1993-2017 Alexander Roshal\n\n\nExtracting from /content/prep_file.rar\n\nExtracting prep_file.csv \b\b\b\b 3%\b\b\b\b 6%\b\b\b\b 10%\b\b\b\b 13%\b\b\b\b 17%\b\b\b\b 20%\b\b\b\b 24%\b\b\b\b 27%\b\b\b\b 30%\b\b\b\b 34%\b\b\b\b 37%\b\b\b\b 41%\b\b\b\b 44%\b\b\b\b 48%\b\b\b\b 51%\b\b\b\b 54%\b\b\b\b 58%\b\b\b\b 61%\b\b\b\b 65%\b\b\b\b 68%\b\b\b\b 72%\b\b\b\b 75%\b\b\b\b 78%\b\b\b\b 82%\b\b\b\b 85%\b\b\b\b 89%\b\b\b\b 92%\b\b\b\b 96%\b\b\b\b 99%\b\b\b\b\b OK \nAll OK\n" ], [ "\ndef pick_in(obj_name):\n fl_out1 = obj_name\n pickle_in = open(fl_out1,\"rb\")\n mod1= pickle.load(pickle_in)\n \n return mod1", "_____no_output_____" ], [ "list_objs = [\"df_all_train2\",\"y_train1\",\"df_all_test2\",\"y_test1\"]\n\nfor i in list_objs:\n globals()[i]= pick_in(i)", "_____no_output_____" ], [ "def auc1_scr(mod1,test_set,actual1):\n \n mod = eval(mod1)\n pred1=mod.predict_proba(test_set)[:,1]\n fpr, tpr, thresholds = roc_curve(actual1, pred1)\n auc1 = auc(fpr, tpr)\n \n return auc1", "_____no_output_____" ], [ "# AdaBoost Classifier\n\nab = AdaBoostClassifier(n_estimators=100, random_state=0)\nab.fit(df_all_train2,y_train1)", "_____no_output_____" ], [ "auc1_te = auc1_scr(\"ab\",df_all_test2,y_test1)\nauc1_tr = auc1_scr(\"ab\",df_all_train2,y_train1)", "_____no_output_____" ], [ "auc1_te,auc1_tr", "_____no_output_____" ] ], [ [ "# Grid Search", "_____no_output_____" ] ], [ [ "# This will take around 1hr+ to execute on standard colab runtime\n# AB_grid= AdaBoostClassifier(random_state=42)\n\n# params = {\n# 'n_estimators': [100,500],\n# 'learning_rate': [0.2,0.5,1],\n# 'algorithm': ['SAMME','SAMME.R'],\n# 'base_estimator' : [DecisionTreeClassifier(max_depth=1),DecisionTreeClassifier(max_depth=2),DecisionTreeClassifier(max_depth=5)]\n# }\n\n# grid_search = GridSearchCV(estimator=AB_grid, \n# param_grid=params, \n# cv=2, n_jobs=5, verbose=1, scoring = \"roc_auc\")\n\n\n# grid_search.fit(df_all_test2,y_test1)", "_____no_output_____" ], [ "# score_df = pd.DataFrame(grid_search.cv_results_)\n# score_df.head()\n# score_df.sort_values([\"rank_test_score\"]).head(5)", "_____no_output_____" ] ], [ [ "# Gradient Boosting", "_____no_output_____" ] ], [ [ "# GradientBoosting Classifier\n\n# It will take around 9 mins for execution\ngb = GradientBoostingClassifier(max_depth=5,n_estimators=300, learning_rate=0.5)\ngb.fit(df_all_train2,y_train1)\n", "_____no_output_____" ], [ "auc1_te = auc1_scr(\"gb\",df_all_test2,y_test1)\nauc1_tr = auc1_scr(\"gb\",df_all_train2,y_train1)", "_____no_output_____" ], [ "auc1_te,auc1_tr", "_____no_output_____" ], [ "# XGB Classifier\n\n# It will take around 4 mins for execution\nxgb = XGBClassifier()\nxgb.fit(df_all_train2,y_train1)", "_____no_output_____" ], [ "auc1_te = auc1_scr(\"xgb\",df_all_test2,y_test1)\nauc1_tr = auc1_scr(\"xgb\",df_all_train2,y_train1)", "_____no_output_____" ], [ "auc1_te,auc1_tr", "_____no_output_____" ], [ "class_weights = [0.1,0.9]\n\nxgb_param = XGBClassifier(n_estimators=300,max_depth= 5,class_weights = class_weights,\n subsample= 0.2,colsample_bytree= 0.3,random_state=0)\n\n\nxgb_param.fit(df_all_train2,y_train1)", "_____no_output_____" ], [ "auc1_te = auc1_scr(\"xgb_param\",df_all_test2,y_test1)\nauc1_tr = auc1_scr(\"xgb_param\",df_all_train2,y_train1)", "_____no_output_____" ], [ "auc1_te,auc1_tr", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a79a4fb6aaa4c61029aafe0feaa5461aa27cfff
359,729
ipynb
Jupyter Notebook
project_workbook.ipynb
tgeasley/ms2_project
a9dfa4d843c52363a28b6a0cce83571916aa96ff
[ "MIT" ]
null
null
null
project_workbook.ipynb
tgeasley/ms2_project
a9dfa4d843c52363a28b6a0cce83571916aa96ff
[ "MIT" ]
null
null
null
project_workbook.ipynb
tgeasley/ms2_project
a9dfa4d843c52363a28b6a0cce83571916aa96ff
[ "MIT" ]
2
2021-09-15T02:08:27.000Z
2022-02-17T15:41:25.000Z
181.406455
119,142
0.787073
[ [ [ "# Notes\n\nThis project requires the creation of an **assets** and **outputs** folder in the same directory as the notebook. The assets folder should contain the WikiLarge_Train.csv file available from [Kaggle](https://www.kaggle.com/c/umich-siads-695-predicting-text-difficulty).\n\nSeveral files here are writting to the **outputs** folder during the process due to long run times of different parts of the script.\n\n# Imports", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\nimport time\nimport pyLDAvis.sklearn\nfrom pylab import bone, pcolor, colorbar, plot, show, rcParams, savefig\nfrom itertools import chain\nimport pickle\nimport spacy\nfrom collections import Iterable\nimport ast\n\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.cluster import AffinityPropagation, KMeans, DBSCAN, Birch, MiniBatchKMeans, OPTICS\nfrom sklearn.metrics import silhouette_score\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.manifold import TSNE\n\nfrom wordcloud import WordCloud\n\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)", "/tmp/ipykernel_3033/26246093.py:11: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated since Python 3.3, and in 3.10 it will stop working\n from collections import Iterable\n" ] ], [ [ "# Import Dataset and Build Tokenset\n\nThis section uses the nlp.pipe method to build out the document set and then passes the tokenset to a seperate function that lemmatizses the words, removes stopwords, and removes punctation.", "_____no_output_____" ] ], [ [ "def lemmatizer_alt(input):\n lemma_list = [token.lemma_ for token in input if token.is_stop == False and token.is_punct==False]\n\n return lemma_list\n\ndef build_dataset():\n wiki = pd.read_csv('assets/WikiLarge_Train.csv')\n\n l_start = time.time()\n nlp = spacy.load(\"en_core_web_sm\", exclude=['parser', \"ner\"])\n wiki['nlp_text'] = [doc for doc in nlp.pipe(wiki[\"original_text\"].tolist())]\n wiki['tokenized_text'] = wiki[\"nlp_text\"].apply(lemmatizer_alt)\n l_duration = time.time() - l_start\n print('Pipe Model Timing: {:.2f} seconds'.format(l_duration), flush=True)\n\n wiki = wiki[[\n 'original_text',\n 'label',\n 'tokenized_text'\n ]]\n\n wiki.to_csv('outputs/wiki_tokenized.csv')\n \n return 0", "_____no_output_____" ], [ "build_dataset()", "Pipe Model Timing: 313.40 seconds\n" ] ], [ [ "# Build LDA\n\nThe LDA build proceeds through the following steps:\n1. Vectorization of the tokenset built in the previous step.\n2. Setup of GridSearch parameters.\n3. Search of the LDA model through the parameters.\n4. Pickling of the final best model, vectorizer, grid search results, and others.\n\nThe GridSearch will default to log-likelihood, which should be sufficent here.", "_____no_output_____" ] ], [ [ "def lda_build():\n\n l_start = time.time()\n wiki = pd.read_csv('outputs/wiki_tokenized.csv')\n wiki['token_list'] = wiki['tokenized_text'].apply(ast.literal_eval)\n input_list = wiki['token_list'].str.join(\" \")\n l_duration = time.time() - l_start\n print('List Construction: {:.2f} seconds'.format(l_duration), flush=True)\n \n l_start = time.time()\n vectorizer = TfidfVectorizer(\n analyzer='word', \n min_df=10,\n token_pattern='[a-zA-Z0-9]{4,}' # Ensure every token is at least 4 char long\n )\n data_vectorized = vectorizer.fit_transform(input_list)\n l_duration = time.time() - l_start\n print(\"Number of topics: {:.0f}\".format(data_vectorized.shape[1]))\n print('Vector Construction: {:.2f} seconds'.format(l_duration), flush=True)\n \n search_params = {\n 'n_components': [10, 15, 20], \n 'learning_decay': [.5, .7, .9]\n }\n\n l_start = time.time()\n lda = LatentDirichletAllocation(\n max_iter=5, \n learning_method='online', \n learning_offset=50.,\n random_state=42,\n verbose=1\n )\n \n model = GridSearchCV(\n lda, \n param_grid=search_params,\n verbose=1,\n n_jobs=1\n )\n \n model.fit(data_vectorized)\n l_duration = time.time() - l_start\n print('LDA Grid Search: {:.2f} seconds'.format(l_duration), flush=True)\n \n l_start = time.time()\n best_lda_model = model.best_estimator_\n data_lda = best_lda_model.transform(data_vectorized)\n search_results = model.cv_results_\n l_duration = time.time() - l_start\n print('Grid Search Data Extraction: {:.2f} seconds'.format(l_duration), flush=True)\n\n pickle.dump(vectorizer, open(\"outputs/vectorizer.pkl\", \"wb\"))\n pickle.dump(data_vectorized, open(\"outputs/data_vectorized.pkl\", \"wb\"))\n pickle.dump(data_lda, open(\"outputs/data_lda.pkl\", \"wb\"))\n pickle.dump(best_lda_model, open(\"outputs/lda_model.pkl\", 'wb'))\n pickle.dump(search_results, open(\"outputs/grid_search_results.pkl\", 'wb'))\n\n return 0", "_____no_output_____" ], [ "lda_build()", "List Construction: 12.25 seconds\nNumber of topics: 26079\nVector Construction: 3.96 seconds\nFitting 5 folds for each of 9 candidates, totalling 45 fits\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\niteration: 1 of max_iter: 5\niteration: 2 of max_iter: 5\niteration: 3 of max_iter: 5\niteration: 4 of max_iter: 5\niteration: 5 of max_iter: 5\nLDA Grid Search: 16507.65 seconds\nGrid Search Data Extraction: 27.97 seconds\n" ] ], [ [ "# Grid Search Review\nThis section plots the log-likelyhood across models from grid_search_results.pkl", "_____no_output_____" ] ], [ [ "grid_data = pickle.load(open(\"outputs/grid_search_results.pkl\", \"rb\"))\nsearch_params = {\n 'n_components': [10, 15, 20], \n 'learning_decay': [.5, .7, .9]\n}\ndef plot_grid_search(cv_results, grid_param_1, grid_param_2, name_param_1, name_param_2):\n scores_mean = cv_results['mean_test_score']\n scores_mean = np.array(scores_mean).reshape(len(grid_param_2),len(grid_param_1))\n\n scores_sd = cv_results['std_test_score']\n scores_sd = np.array(scores_sd).reshape(len(grid_param_2),len(grid_param_1))\n\n _, ax = plt.subplots(1,1)\n\n for idx, val in enumerate(grid_param_2):\n ax.plot(grid_param_1, scores_mean[idx,:], '-o', label= name_param_2 + ': ' + str(val))\n\n ax.set_xlabel(name_param_1)\n ax.set_ylabel('Log Likelyhood')\n ax.legend(loc=\"best\")\n ax.grid('on')\n\nplot_grid_search(grid_data, search_params['n_components'], search_params['learning_decay'], 'N Components', 'Learning Decay')", "_____no_output_____" ] ], [ [ "# LDA Plotting\nThis section creates an interactive plot of the LDA model for examination of the topics extracted from the document.", "_____no_output_____" ] ], [ [ "def LDA_plot():\n lda_model = pickle.load(open(\"outputs/lda_model.pkl\", \"rb\"))\n data_vectorized = pickle.load(open(\"outputs/data_vectorized.pkl\", \"rb\"))\n vectorizer = pickle.load(open(\"outputs/vectorizer.pkl\", \"rb\"))\n pyLDAvis.enable_notebook()\n dash = pyLDAvis.sklearn.prepare(lda_model, data_vectorized, vectorizer, mds='tsne')\n return dash", "_____no_output_____" ], [ "LDA_plot()", "/home/tgeasley/anaconda3/lib/python3.8/site-packages/pyLDAvis/_prepare.py:246: FutureWarning: In a future version of pandas all arguments of DataFrame.drop except for the argument 'labels' will be keyword-only\n default_term_info = default_term_info.sort_values(\n/home/tgeasley/anaconda3/lib/python3.8/site-packages/sklearn/manifold/_t_sne.py:691: FutureWarning: 'square_distances' has been introduced in 0.24 to help phase out legacy squaring behavior. The 'legacy' setting will be removed in 1.1 (renaming of 0.26), and the default setting will be changed to True. In 1.3, 'square_distances' will be removed altogether, and distances will be squared by default. Set 'square_distances'=True to silence this warning.\n warnings.warn(\n" ] ], [ [ "# Tag Application\n\nNext the LDA model is used to generate the top n tags for each article. Will only function for articles with at least n tags because otherwise vectorization and clustering won't work as vectors will have different sizes", "_____no_output_____" ] ], [ [ "def tag_df(n_items):\n wiki = pd.read_csv('outputs/wiki_tokenized.csv')\n lda_model = pickle.load(open(\"outputs/lda_model.pkl\", \"rb\"))\n data_vectorized = pickle.load(open(\"outputs/data_vectorized.pkl\", \"rb\"))\n vectorizer = pickle.load(open(\"outputs/vectorizer.pkl\", \"rb\"))\n \n threshold = 0 #Arbitrary, might change\n n_topics = 10 # Change to best fit\n \n\n list_scores = []\n list_words = []\n\n feature_names = np.array(vectorizer.get_feature_names())\n lda_components = lda_model.components_ #/ lda_model.components_.sum(axis=1)[:, np.newaxis] # normalization\n\n total_length = len(wiki)\n return_df = {\n 'index': [],\n 'tag': []\n }\n \n for index, row in wiki.iterrows():\n\n if index % 1000 == 0 and index > 0:\n print('Percent complete: {:.2%}'.format(index/total_length))\n\n text_projection = data_vectorized[index,:].toarray()\n \n element_score = np.multiply(text_projection[0],lda_components)\n non_zero = np.nonzero(element_score)\n \n l_words = {}\n for i,j in zip(non_zero[0],non_zero[1]):\n if feature_names[j] in l_words:\n l_words[feature_names[j] ] += element_score[i,j]\n else:\n l_words[feature_names[j] ] = element_score[i,j]\n l_words = [k for k, v in sorted(l_words.items(), key=lambda item: item[1], reverse=True)]\n if len(l_words) >= n_items:\n l_words = l_words[:n_items]\n \n \n return_df['index'].append(index)\n return_df['tag'].append(\" \".join(list(l_words)))\n \n return_df = pd.DataFrame(return_df).set_index('index')\n wiki = wiki.join(return_df)\n return wiki", "_____no_output_____" ], [ "tagged_df = tag_df(5)\ntagged_df.to_csv('outputs/tagged_df.csv')\ntagged_df.head(5)", "Percent complete: 0.24%\nPercent complete: 0.48%\nPercent complete: 0.72%\nPercent complete: 0.96%\nPercent complete: 1.20%\nPercent complete: 1.44%\nPercent complete: 1.68%\nPercent complete: 1.92%\nPercent complete: 2.16%\nPercent complete: 2.40%\nPercent complete: 2.64%\nPercent complete: 2.88%\nPercent complete: 3.12%\nPercent complete: 3.36%\nPercent complete: 3.60%\nPercent complete: 3.84%\nPercent complete: 4.08%\nPercent complete: 4.32%\nPercent complete: 4.56%\nPercent complete: 4.80%\nPercent complete: 5.04%\nPercent complete: 5.28%\nPercent complete: 5.52%\nPercent complete: 5.76%\nPercent complete: 6.00%\nPercent complete: 6.24%\nPercent complete: 6.48%\nPercent complete: 6.72%\nPercent complete: 6.96%\nPercent complete: 7.20%\nPercent complete: 7.44%\nPercent complete: 7.68%\nPercent complete: 7.92%\nPercent complete: 8.16%\nPercent complete: 8.40%\nPercent complete: 8.64%\nPercent complete: 8.88%\nPercent complete: 9.12%\nPercent complete: 9.36%\nPercent complete: 9.60%\nPercent complete: 9.84%\nPercent complete: 10.08%\nPercent complete: 10.32%\nPercent complete: 10.56%\nPercent complete: 10.80%\nPercent complete: 11.04%\nPercent complete: 11.28%\nPercent complete: 11.52%\nPercent complete: 11.76%\nPercent complete: 12.00%\nPercent complete: 12.24%\nPercent complete: 12.48%\nPercent complete: 12.72%\nPercent complete: 12.96%\nPercent complete: 13.20%\nPercent complete: 13.44%\nPercent complete: 13.68%\nPercent complete: 13.92%\nPercent complete: 14.16%\nPercent complete: 14.40%\nPercent complete: 14.64%\nPercent complete: 14.88%\nPercent complete: 15.12%\nPercent complete: 15.36%\nPercent complete: 15.60%\nPercent complete: 15.84%\nPercent complete: 16.08%\nPercent complete: 16.32%\nPercent complete: 16.56%\nPercent complete: 16.80%\nPercent complete: 17.04%\nPercent complete: 17.28%\nPercent complete: 17.52%\nPercent complete: 17.76%\nPercent complete: 18.00%\nPercent complete: 18.24%\nPercent complete: 18.48%\nPercent complete: 18.72%\nPercent complete: 18.96%\nPercent complete: 19.20%\nPercent complete: 19.44%\nPercent complete: 19.68%\nPercent complete: 19.92%\nPercent complete: 20.16%\nPercent complete: 20.40%\nPercent complete: 20.63%\nPercent complete: 20.87%\nPercent complete: 21.11%\nPercent complete: 21.35%\nPercent complete: 21.59%\nPercent complete: 21.83%\nPercent complete: 22.07%\nPercent complete: 22.31%\nPercent complete: 22.55%\nPercent complete: 22.79%\nPercent complete: 23.03%\nPercent complete: 23.27%\nPercent complete: 23.51%\nPercent complete: 23.75%\nPercent complete: 23.99%\nPercent complete: 24.23%\nPercent complete: 24.47%\nPercent complete: 24.71%\nPercent complete: 24.95%\nPercent complete: 25.19%\nPercent complete: 25.43%\nPercent complete: 25.67%\nPercent complete: 25.91%\nPercent complete: 26.15%\nPercent complete: 26.39%\nPercent complete: 26.63%\nPercent complete: 26.87%\nPercent complete: 27.11%\nPercent complete: 27.35%\nPercent complete: 27.59%\nPercent complete: 27.83%\nPercent complete: 28.07%\nPercent complete: 28.31%\nPercent complete: 28.55%\nPercent complete: 28.79%\nPercent complete: 29.03%\nPercent complete: 29.27%\nPercent complete: 29.51%\nPercent complete: 29.75%\nPercent complete: 29.99%\nPercent complete: 30.23%\nPercent complete: 30.47%\nPercent complete: 30.71%\nPercent complete: 30.95%\nPercent complete: 31.19%\nPercent complete: 31.43%\nPercent complete: 31.67%\nPercent complete: 31.91%\nPercent complete: 32.15%\nPercent complete: 32.39%\nPercent complete: 32.63%\nPercent complete: 32.87%\nPercent complete: 33.11%\nPercent complete: 33.35%\nPercent complete: 33.59%\nPercent complete: 33.83%\nPercent complete: 34.07%\nPercent complete: 34.31%\nPercent complete: 34.55%\nPercent complete: 34.79%\nPercent complete: 35.03%\nPercent complete: 35.27%\nPercent complete: 35.51%\nPercent complete: 35.75%\nPercent complete: 35.99%\nPercent complete: 36.23%\nPercent complete: 36.47%\nPercent complete: 36.71%\nPercent complete: 36.95%\nPercent complete: 37.19%\nPercent complete: 37.43%\nPercent complete: 37.67%\nPercent complete: 37.91%\nPercent complete: 38.15%\nPercent complete: 38.39%\nPercent complete: 38.63%\nPercent complete: 38.87%\nPercent complete: 39.11%\nPercent complete: 39.35%\nPercent complete: 39.59%\nPercent complete: 39.83%\nPercent complete: 40.07%\nPercent complete: 40.31%\nPercent complete: 40.55%\nPercent complete: 40.79%\nPercent complete: 41.03%\nPercent complete: 41.27%\nPercent complete: 41.51%\nPercent complete: 41.75%\nPercent complete: 41.99%\nPercent complete: 42.23%\nPercent complete: 42.47%\nPercent complete: 42.71%\nPercent complete: 42.95%\nPercent complete: 43.19%\nPercent complete: 43.43%\nPercent complete: 43.67%\nPercent complete: 43.91%\nPercent complete: 44.15%\nPercent complete: 44.39%\nPercent complete: 44.63%\nPercent complete: 44.87%\nPercent complete: 45.11%\nPercent complete: 45.35%\nPercent complete: 45.59%\nPercent complete: 45.83%\nPercent complete: 46.07%\nPercent complete: 46.31%\nPercent complete: 46.55%\nPercent complete: 46.79%\nPercent complete: 47.03%\nPercent complete: 47.27%\nPercent complete: 47.51%\nPercent complete: 47.75%\nPercent complete: 47.99%\nPercent complete: 48.23%\nPercent complete: 48.47%\nPercent complete: 48.71%\nPercent complete: 48.95%\nPercent complete: 49.19%\nPercent complete: 49.43%\nPercent complete: 49.67%\nPercent complete: 49.91%\nPercent complete: 50.15%\nPercent complete: 50.39%\nPercent complete: 50.63%\nPercent complete: 50.87%\nPercent complete: 51.11%\nPercent complete: 51.35%\nPercent complete: 51.59%\nPercent complete: 51.83%\nPercent complete: 52.07%\nPercent complete: 52.31%\nPercent complete: 52.55%\nPercent complete: 52.79%\nPercent complete: 53.03%\nPercent complete: 53.27%\nPercent complete: 53.51%\nPercent complete: 53.75%\nPercent complete: 53.99%\nPercent complete: 54.23%\nPercent complete: 54.47%\nPercent complete: 54.71%\nPercent complete: 54.95%\nPercent complete: 55.19%\nPercent complete: 55.43%\nPercent complete: 55.67%\nPercent complete: 55.91%\nPercent complete: 56.15%\nPercent complete: 56.39%\nPercent complete: 56.63%\nPercent complete: 56.87%\nPercent complete: 57.11%\nPercent complete: 57.35%\nPercent complete: 57.59%\nPercent complete: 57.83%\nPercent complete: 58.07%\nPercent complete: 58.31%\nPercent complete: 58.55%\nPercent complete: 58.79%\nPercent complete: 59.03%\nPercent complete: 59.27%\nPercent complete: 59.51%\nPercent complete: 59.75%\nPercent complete: 59.99%\nPercent complete: 60.23%\nPercent complete: 60.47%\nPercent complete: 60.71%\nPercent complete: 60.95%\nPercent complete: 61.19%\nPercent complete: 61.43%\nPercent complete: 61.67%\nPercent complete: 61.90%\nPercent complete: 62.14%\nPercent complete: 62.38%\nPercent complete: 62.62%\nPercent complete: 62.86%\nPercent complete: 63.10%\nPercent complete: 63.34%\nPercent complete: 63.58%\nPercent complete: 63.82%\nPercent complete: 64.06%\nPercent complete: 64.30%\nPercent complete: 64.54%\nPercent complete: 64.78%\nPercent complete: 65.02%\nPercent complete: 65.26%\nPercent complete: 65.50%\nPercent complete: 65.74%\nPercent complete: 65.98%\nPercent complete: 66.22%\nPercent complete: 66.46%\nPercent complete: 66.70%\nPercent complete: 66.94%\nPercent complete: 67.18%\nPercent complete: 67.42%\nPercent complete: 67.66%\nPercent complete: 67.90%\nPercent complete: 68.14%\nPercent complete: 68.38%\nPercent complete: 68.62%\nPercent complete: 68.86%\nPercent complete: 69.10%\nPercent complete: 69.34%\nPercent complete: 69.58%\nPercent complete: 69.82%\nPercent complete: 70.06%\nPercent complete: 70.30%\nPercent complete: 70.54%\nPercent complete: 70.78%\nPercent complete: 71.02%\nPercent complete: 71.26%\nPercent complete: 71.50%\nPercent complete: 71.74%\nPercent complete: 71.98%\nPercent complete: 72.22%\nPercent complete: 72.46%\nPercent complete: 72.70%\nPercent complete: 72.94%\nPercent complete: 73.18%\nPercent complete: 73.42%\nPercent complete: 73.66%\nPercent complete: 73.90%\nPercent complete: 74.14%\nPercent complete: 74.38%\nPercent complete: 74.62%\nPercent complete: 74.86%\nPercent complete: 75.10%\nPercent complete: 75.34%\nPercent complete: 75.58%\nPercent complete: 75.82%\nPercent complete: 76.06%\nPercent complete: 76.30%\nPercent complete: 76.54%\nPercent complete: 76.78%\nPercent complete: 77.02%\nPercent complete: 77.26%\nPercent complete: 77.50%\nPercent complete: 77.74%\nPercent complete: 77.98%\nPercent complete: 78.22%\nPercent complete: 78.46%\nPercent complete: 78.70%\nPercent complete: 78.94%\nPercent complete: 79.18%\n" ] ], [ [ "# Tag Grouping\n\nThis next section applies a number of clustering methods to the tags generated in the previous section. In order to reduce the search space the tags are vectorized and then transformed according to the LDA number of topics.", "_____no_output_____" ] ], [ [ "tagged_df = pd.read_csv('outputs/tagged_df.csv')\ntagged_df.head(5)", "_____no_output_____" ], [ "def other_tags(input_df, tag_label, return_scores = None):\n \n method = []\n method_scores = []\n\n l_start = time.time()\n lda_model = pickle.load(open(\"outputs/lda_model.pkl\", \"rb\"))\n vectorizer = pickle.load(open(\"outputs/vectorizer.pkl\", \"rb\"))\n input_df = input_df[input_df[tag_label].str.len()>0]\n X_i = vectorizer.transform(input_df[tag_label])\n print(X_i.shape)\n print(lda_model.components_.shape)\n X = X_i @ lda_model.components_.T\n print(X.shape)\n l_duration = time.time() - l_start\n print('Vectorization: {:.2f} seconds'.format(l_duration), flush=True)\n \n \n l_start = time.time()\n lda_cluster = np.argmax(X, axis=1)\n l_duration = time.time() - l_start\n print('LDA clustering: {:.2f} seconds'.format(l_duration), flush=True)\n print(lda_cluster.shape)\n lda_ss = silhouette_score(X, lda_cluster, sample_size=10000) #Depending on speed, may need to change sample size\n print('Lda score: {:.4f}'.format(lda_ss), flush=True)\n method.append('LDA')\n method_scores.append(lda_ss)\n \n #DBSCAN\n l_start = time.time()\n db_clustering = DBSCAN(eps=3, min_samples=2).fit(X)\n l_duration = time.time() - l_start\n print('DBSCAN: {:.2f} seconds'.format(l_duration), flush=True)\n \n num_clusters = len(list(set(db_clustering.labels_)))\n db_ss = silhouette_score(X, db_clustering.labels_, sample_size=10000) #Depending on speed, may need to change sample size\n \n \n print('DBSCAN score: {:.4f}'.format(db_ss), flush=True)\n print(\"DBSCAN number of clusters: \" +str(num_clusters))\n method.append('DBSCAN')\n method_scores.append(db_ss)\n \n index_range = np.arange(0,X.shape[0],1000)\n \n #BIRCH\n l_start = time.time()\n brc = Birch(n_clusters=None)\n for index,val in enumerate(index_range):\n #print('Birch Current index: '+str(index))\n #l_start = time.time()\n if index+1 >= len(index_range):\n brc = brc.partial_fit(X[val:X.shape[0],:])\n else:\n brc = brc.partial_fit(X[val:index_range[index+1],:])\n #l_duration = time.time() - l_start\n \n l_duration = time.time() - l_start\n print('BIRCH fit: {:.2f} seconds'.format(l_duration), flush=True)\n\n l_start = time.time()\n brc_labels = brc.predict(X)\n num_clusters = len(list(set(brc_labels)))\n l_duration = time.time() - l_start\n print('BIRCH predict: {:.2f} seconds'.format(l_duration), flush=True)\n print(\"Birch number of clusters: \" +str(num_clusters))\n \n birch_ss = silhouette_score(X, brc_labels, sample_size=10000) #Depending on speed, may need to change sample size\n print('Birch score: {:.4f}'.format(birch_ss), flush=True)\n \n method.append('BIRCH')\n method_scores.append(birch_ss)\n \n if return_scores is None:\n return_scores = pd.DataFrame({\n 'method':method,\n 'method_scores':method_scores,\n })\n else:\n l_return_scores = pd.DataFrame({\n 'method':method,\n 'method_scores':method_scores,\n })\n return_scores.append(l_return_scores, ignore_index=True)\n \n \n \n return return_scores\n\ndef k_means_tags(input_df, tag_label, return_scores = None):\n \n method = []\n method_scores = []\n\n interial_clusters = []\n intertia = []\n \n l_start = time.time()\n lda_model = pickle.load(open(\"outputs/lda_model.pkl\", \"rb\"))\n vectorizer = pickle.load(open(\"outputs/vectorizer.pkl\", \"rb\"))\n input_df = input_df[input_df[tag_label].str.len()>0]\n X_i = vectorizer.transform(input_df[tag_label])\n print(X_i.shape)\n print(lda_model.components_.shape)\n X = X_i @ lda_model.components_.T\n print(X.shape)\n l_duration = time.time() - l_start\n print('Vectorization: {:.2f} seconds'.format(l_duration), flush=True)\n \n index_range = np.arange(0,X.shape[0],1000)\n cluster_range = [10,30,50,70,100,300,400,500,600,700,800,900,1000]\n \n for el in cluster_range:\n kmeans = MiniBatchKMeans(n_clusters=el,random_state=0,batch_size=6)\n l_start = time.time()\n for index,val in enumerate(index_range):\n if index+1 >= len(index_range):\n kmeans = kmeans.partial_fit(X[val:X.shape[0],:])\n else:\n kmeans = kmeans.partial_fit(X[val:index_range[index+1],:])\n l_duration = time.time() - l_start\n print('Kmeans fit: {:.2f} seconds'.format(l_duration), flush=True)\n\n\n l_start = time.time()\n kmeans_labels = kmeans.predict(X)\n l_duration = time.time() - l_start\n print('Kmeans predict: {:.2f} seconds'.format(l_duration), flush=True)\n\n kmeans_ss = silhouette_score(X, kmeans_labels, sample_size=10000) #Depending on speed, may need to change sample size\n print('Kmeans score: {:.4f}'.format(kmeans_ss), flush=True)\n print(\"Number of clusters: \" +str(el))\n \n method.append('K Means '+str(el)+' clusters')\n method_scores.append(kmeans_ss)\n \n interial_clusters.append(el)\n intertia.append(kmeans.inertia_)\n \n\n inertia_scores = pd.DataFrame({\n 'interia_clusters': interial_clusters,\n 'interia_score': intertia\n })\n if return_scores is None:\n return_scores = pd.DataFrame({\n 'method':method,\n 'method_scores':method_scores,\n })\n else:\n l_return_scores = pd.DataFrame({\n 'method':method,\n 'method_scores':method_scores,\n })\n return_scores = return_scores.append(l_return_scores, ignore_index=True)\n \n return pd.DataFrame(inertia_scores),return_scores", "_____no_output_____" ], [ "oth_scores = other_tags(tagged_df, 'tags')", "(412305, 26079)\n(10, 26079)\n(412305, 10)\nVectorization: 2.21 seconds\nLDA clustering: 0.01 seconds\n(412305,)\nLda score: -0.0565\nDBSCAN: 11.41 seconds\nDBSCAN score: -0.3433\nDBSCAN number of clusters: 65135\nBIRCH fit: 714.97 seconds\nBIRCH predict: 1186.76 seconds\nBirch number of clusters: 279271\nBirch score: 0.0870\n" ], [ "inertia_dict, score_dict = k_means_tags(tagged_df, 'tags', oth_scores)", "(412305, 26079)\n(10, 26079)\n(412305, 10)\nVectorization: 2.34 seconds\nKmeans fit: 0.55 seconds\nKmeans predict: 7.96 seconds\nKmeans score: 0.2398\nNumber of clusters: 10\nKmeans fit: 0.92 seconds\nKmeans predict: 10.19 seconds\nKmeans score: 0.2234\nNumber of clusters: 30\nKmeans fit: 1.25 seconds\nKmeans predict: 9.42 seconds\nKmeans score: 0.1823\nNumber of clusters: 50\nKmeans fit: 1.52 seconds\nKmeans predict: 9.67 seconds\nKmeans score: 0.1779\nNumber of clusters: 70\nKmeans fit: 1.98 seconds\nKmeans predict: 9.74 seconds\nKmeans score: 0.1545\nNumber of clusters: 100\nKmeans fit: 4.94 seconds\nKmeans predict: 10.09 seconds\nKmeans score: 0.1374\nNumber of clusters: 300\nKmeans fit: 6.12 seconds\nKmeans predict: 12.31 seconds\nKmeans score: 0.1434\nNumber of clusters: 400\nKmeans fit: 7.42 seconds\nKmeans predict: 11.83 seconds\nKmeans score: 0.1432\nNumber of clusters: 500\nKmeans fit: 8.27 seconds\nKmeans predict: 11.16 seconds\nKmeans score: 0.1344\nNumber of clusters: 600\nKmeans fit: 9.30 seconds\nKmeans predict: 11.31 seconds\nKmeans score: 0.1247\nNumber of clusters: 700\nKmeans fit: 10.39 seconds\nKmeans predict: 11.95 seconds\nKmeans score: 0.1260\nNumber of clusters: 800\nKmeans fit: 11.56 seconds\nKmeans predict: 11.93 seconds\nKmeans score: 0.1193\nNumber of clusters: 900\nKmeans fit: 11.82 seconds\nKmeans predict: 12.00 seconds\nKmeans score: 0.1177\nNumber of clusters: 1000\n" ], [ "score_dict.head()", "_____no_output_____" ] ], [ [ "# Clustering Evaluation\nThe next section evaluates the results of the clustering methods applied previously and then selects the best model for use in the final section of building a word cloud.", "_____no_output_____" ] ], [ [ "method = score_dict['method'].to_list()\nmethod_scores = score_dict['method_scores'].to_list()\n\nplt.bar(method, method_scores)\nplt.xlabel(\"Clustering Method\")\nplt.xticks(rotation=90)\nplt.ylabel(\"Silhouette Score\")\nplt.show()", "_____no_output_____" ], [ "#Plot kmeans inertia\nrange_n_clusters = inertia_dict['interia_clusters'].to_list()\navg_distance = inertia_dict['interia_score'].to_list()\n\nplt.plot(range_n_clusters, avg_distance)\nplt.xlabel(\"Number of Clusters (k)\")\nplt.ylabel(\"Distance\")\nplt.show()", "_____no_output_____" ], [ "def tag_best_k_means(input_df, tag_label, num_clusters):\n \n l_start = time.time()\n lda_model = pickle.load(open(\"outputs/lda_model.pkl\", \"rb\"))\n vectorizer = pickle.load(open(\"outputs/vectorizer.pkl\", \"rb\"))\n input_df = input_df[input_df[tag_label].str.len()>0]\n X_i = vectorizer.transform(input_df[tag_label])\n print(X_i.shape)\n print(lda_model.components_.shape)\n X = X_i @ lda_model.components_.T\n print(X.shape)\n l_duration = time.time() - l_start\n print('Vectorization: {:.2f} seconds'.format(l_duration), flush=True)\n \n index_range = np.arange(0,X.shape[0],1000)\n\n kmeans = MiniBatchKMeans(n_clusters=num_clusters,random_state=0,batch_size=6)\n l_start = time.time()\n for index,val in enumerate(index_range):\n if index+1 >= len(index_range):\n kmeans = kmeans.partial_fit(X[val:X.shape[0],:])\n else:\n kmeans = kmeans.partial_fit(X[val:index_range[index+1],:])\n l_duration = time.time() - l_start\n print('Kmeans fit: {:.2f} seconds'.format(l_duration), flush=True)\n\n\n l_start = time.time()\n kmeans_labels = kmeans.predict(X)\n l_duration = time.time() - l_start\n print('Kmeans predict: {:.2f} seconds'.format(l_duration), flush=True)\n \n input_df['cluster'] = kmeans_labels\n\n \n \n return input_df", "_____no_output_____" ], [ "tag_df = tag_best_k_means(tagged_df, 'tags', 400)", "(412305, 26079)\n(10, 26079)\n(412305, 10)\nVectorization: 2.43 seconds\nKmeans fit: 6.13 seconds\nKmeans predict: 10.59 seconds\n" ] ], [ [ "# Word Clouds\nThis next section generates word clouds for a particular cluster.", "_____no_output_____" ] ], [ [ "def gen_word_cloud(df, tag_col, cluster, cluster_col):\n # Read the whole text.\n text = df[df[cluster_col]==cluster][tag_col].str.cat(sep=' ')\n\n # Generate a word cloud image\n wordcloud = WordCloud().generate(text)\n\n # Display the generated image:\n # the matplotlib way:\n \n #plt.imshow(wordcloud, interpolation='bilinear')\n #plt.axis(\"off\")\n\n # lower max_font_size\n wordcloud = WordCloud(background_color=\"white\", repeat=True).generate(text)\n plt.figure()\n plt.imshow(wordcloud, interpolation=\"bilinear\")\n plt.axis(\"off\")\n #plt.show()\n \n return plt", "_____no_output_____" ], [ "gen_word_cloud(tag_df, 'tags',3,'cluster').show()", "_____no_output_____" ], [ "tag_df.to_csv('outputs/tagged_df.csv')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
4a79b28ca6926791e2e3f093236aab5d7774387a
79,445
ipynb
Jupyter Notebook
docs_src/vision.learner.ipynb
tolitius/fastai
bc093a9de9c9d6c3d2c7e5ad6d7a427face1ca9a
[ "Apache-2.0" ]
3
2019-07-26T11:25:59.000Z
2021-12-20T04:13:18.000Z
docs_src/vision.learner.ipynb
tolitius/fastai
bc093a9de9c9d6c3d2c7e5ad6d7a427face1ca9a
[ "Apache-2.0" ]
null
null
null
docs_src/vision.learner.ipynb
tolitius/fastai
bc093a9de9c9d6c3d2c7e5ad6d7a427face1ca9a
[ "Apache-2.0" ]
1
2021-06-09T21:21:15.000Z
2021-06-09T21:21:15.000Z
84.696162
21,504
0.77655
[ [ [ "## Computer Vision Learner", "_____no_output_____" ], [ "[`vision.learner`](/vision.learner.html#vision.learner) is the module that defines the [`cnn_learner`](/vision.learner.html#cnn_learner) method, to easily get a model suitable for transfer learning.", "_____no_output_____" ] ], [ [ "from fastai.gen_doc.nbdoc import *\nfrom fastai.vision import *\n", "_____no_output_____" ] ], [ [ "## Transfer learning", "_____no_output_____" ], [ "Transfer learning is a technique where you use a model trained on a very large dataset (usually [ImageNet](http://image-net.org/) in computer vision) and then adapt it to your own dataset. The idea is that it has learned to recognize many features on all of this data, and that you will benefit from this knowledge, especially if your dataset is small, compared to starting from a randomly initialized model. It has been proved in [this article](https://arxiv.org/abs/1805.08974) on a wide range of tasks that transfer learning nearly always give better results.\n\nIn practice, you need to change the last part of your model to be adapted to your own number of classes. Most convolutional models end with a few linear layers (a part will call head). The last convolutional layer will have analyzed features in the image that went through the model, and the job of the head is to convert those in predictions for each of our classes. In transfer learning we will keep all the convolutional layers (called the body or the backbone of the model) with their weights pretrained on ImageNet but will define a new head initialized randomly.\n\nThen we will train the model we obtain in two phases: first we freeze the body weights and only train the head (to convert those analyzed features into predictions for our own data), then we unfreeze the layers of the backbone (gradually if necessary) and fine-tune the whole model (possibly using differential learning rates).\n\nThe [`cnn_learner`](/vision.learner.html#cnn_learner) factory method helps you to automatically get a pretrained model from a given architecture with a custom head that is suitable for your data.", "_____no_output_____" ] ], [ [ "show_doc(cnn_learner)", "_____no_output_____" ] ], [ [ "This method creates a [`Learner`](/basic_train.html#Learner) object from the [`data`](/vision.data.html#vision.data) object and model inferred from it with the backbone given in `arch`. Specifically, it will cut the model defined by `arch` (randomly initialized if `pretrained` is False) at the last convolutional layer by default (or as defined in `cut`, see below) and add:\n- an [`AdaptiveConcatPool2d`](/layers.html#AdaptiveConcatPool2d) layer,\n- a [`Flatten`](/layers.html#Flatten) layer,\n- blocks of \\[[`nn.BatchNorm1d`](https://pytorch.org/docs/stable/nn.html#torch.nn.BatchNorm1d), [`nn.Dropout`](https://pytorch.org/docs/stable/nn.html#torch.nn.Dropout), [`nn.Linear`](https://pytorch.org/docs/stable/nn.html#torch.nn.Linear), [`nn.ReLU`](https://pytorch.org/docs/stable/nn.html#torch.nn.ReLU)\\] layers.\n\nThe blocks are defined by the `lin_ftrs` and `ps` arguments. Specifically, the first block will have a number of inputs inferred from the backbone `arch` and the last one will have a number of outputs equal to `data.c` (which contains the number of classes of the data) and the intermediate blocks have a number of inputs/outputs determined by `lin_frts` (of course a block has a number of inputs equal to the number of outputs of the previous block). The default is to have an intermediate hidden size of 512 (which makes two blocks `model_activation` -> 512 -> `n_classes`). If you pass a float then the final dropout layer will have the value `ps`, and the remaining will be `ps/2`. If you pass a list then the values are used for dropout probabilities directly.\n\nNote that the very last block doesn't have a [`nn.ReLU`](https://pytorch.org/docs/stable/nn.html#torch.nn.ReLU) activation, to allow you to use any final activation you want (generally included in the loss function in pytorch). Also, the backbone will be frozen if you choose `pretrained=True` (so only the head will train if you call [`fit`](/basic_train.html#fit)) so that you can immediately start phase one of training as described above.\n\nAlternatively, you can define your own `custom_head` to put on top of the backbone. If you want to specify where to split `arch` you should so in the argument `cut` which can either be the index of a specific layer (the result will not include that layer) or a function that, when passed the model, will return the backbone you want.\n\nThe final model obtained by stacking the backbone and the head (custom or defined as we saw) is then separated in groups for gradual unfreezing or differential learning rates. You can specify how to split the backbone in groups with the optional argument `split_on` (should be a function that returns those groups when given the backbone). \n\nThe `kwargs` will be passed on to [`Learner`](/basic_train.html#Learner), so you can put here anything that [`Learner`](/basic_train.html#Learner) will accept ([`metrics`](/metrics.html#metrics), `loss_func`, `opt_func`...)", "_____no_output_____" ] ], [ [ "path = untar_data(URLs.MNIST_SAMPLE)\ndata = ImageDataBunch.from_folder(path)", "_____no_output_____" ], [ "learner = cnn_learner(data, models.resnet18, metrics=[accuracy])\nlearner.fit_one_cycle(1,1e-3)", "_____no_output_____" ], [ "learner.save('one_epoch')", "_____no_output_____" ], [ "show_doc(unet_learner)", "_____no_output_____" ] ], [ [ "This time the model will be a [`DynamicUnet`](/vision.models.unet.html#DynamicUnet) with an encoder based on `arch` (maybe `pretrained`) that is cut depending on `split_on`. `blur_final`, `norm_type`, `blur`, `self_attention`, `y_range`, `last_cross` and `bottle` are passed to unet constructor, the `kwargs` are passed to the initialization of the [`Learner`](/basic_train.html#Learner).", "_____no_output_____" ] ], [ [ "jekyll_warn(\"The models created with this function won't work with pytorch `nn.DataParallel`, you have to use distributed training instead!\")", "_____no_output_____" ] ], [ [ "### Get predictions", "_____no_output_____" ], [ "Once you've actually trained your model, you may want to use it on a single image. This is done by using the following method.", "_____no_output_____" ] ], [ [ "show_doc(Learner.predict)", "_____no_output_____" ], [ "img = learner.data.train_ds[0][0]\nlearner.predict(img)", "_____no_output_____" ] ], [ [ "Here the predict class for our image is '3', which corresponds to a label of 0. The probabilities the model found for each class are 99.65% and 0.35% respectively, so its confidence is pretty high.\n\nNote that if you want to load your trained model and use it on inference mode with the previous function, you should export your [`Learner`](/basic_train.html#Learner).", "_____no_output_____" ] ], [ [ "learner.export()", "_____no_output_____" ] ], [ [ "And then you can load it with an empty data object that has the same internal state like this:", "_____no_output_____" ] ], [ [ "learn = load_learner(path)", "_____no_output_____" ] ], [ [ "### Customize your model", "_____no_output_____" ], [ "You can customize [`cnn_learner`](/vision.learner.html#cnn_learner) for your own model's default `cut` and `split_on` functions by adding them to the dictionary `model_meta`. The key should be your model and the value should be a dictionary with the keys `cut` and `split_on` (see the source code for examples). The constructor will call [`create_body`](/vision.learner.html#create_body) and [`create_head`](/vision.learner.html#create_head) for you based on `cut`; you can also call them yourself, which is particularly useful for testing.", "_____no_output_____" ] ], [ [ "show_doc(create_body)", "_____no_output_____" ], [ "show_doc(create_head, doc_string=False)", "_____no_output_____" ] ], [ [ "Model head that takes `nf` features, runs through `lin_ftrs`, and ends with `nc` classes. `ps` is the probability of the dropouts, as documented above in [`cnn_learner`](/vision.learner.html#cnn_learner).", "_____no_output_____" ] ], [ [ "show_doc(ClassificationInterpretation, title_level=3)", "_____no_output_____" ] ], [ [ "This provides a confusion matrix and visualization of the most incorrect images. Pass in your [`data`](/vision.data.html#vision.data), calculated `preds`, actual `y`, and your `losses`, and then use the methods below to view the model interpretation results. For instance:", "_____no_output_____" ] ], [ [ "learn = cnn_learner(data, models.resnet18)\nlearn.fit(1)\npreds,y,losses = learn.get_preds(with_loss=True)\ninterp = ClassificationInterpretation(learn, preds, y, losses)", "_____no_output_____" ] ], [ [ "The following factory method gives a more convenient way to create an instance of this class:", "_____no_output_____" ] ], [ [ "show_doc(ClassificationInterpretation.from_learner, full_name='from_learner')", "_____no_output_____" ] ], [ [ "You can also use a shortcut `learn.interpret()` to do the same.", "_____no_output_____" ] ], [ [ "show_doc(Learner.interpret, full_name='interpret')", "_____no_output_____" ] ], [ [ "Note that this shortcut is a [`Learner`](/basic_train.html#Learner) object/class method that can be called as: `learn.interpret()`.", "_____no_output_____" ] ], [ [ "show_doc(ClassificationInterpretation.plot_top_losses, full_name='plot_top_losses')", "_____no_output_____" ] ], [ [ "The `k` items are arranged as a square, so it will look best if `k` is a square number (4, 9, 16, etc). The title of each image shows: prediction, actual, loss, probability of actual class. When `heatmap` is True (by default it's True) , Grad-CAM heatmaps (http://openaccess.thecvf.com/content_ICCV_2017/papers/Selvaraju_Grad-CAM_Visual_Explanations_ICCV_2017_paper.pdf) are overlaid on each image. `plot_top_losses` should be used with single-labeled datasets. See `plot_multi_top_losses` below for a version capable of handling multi-labeled datasets.", "_____no_output_____" ] ], [ [ "interp.plot_top_losses(9, figsize=(7,7))", "_____no_output_____" ], [ "show_doc(ClassificationInterpretation.top_losses)", "_____no_output_____" ] ], [ [ "Returns tuple of *(losses,indices)*.", "_____no_output_____" ] ], [ [ "interp.top_losses(9)", "_____no_output_____" ], [ "show_doc(ClassificationInterpretation.plot_multi_top_losses, full_name='plot_multi_top_losses')", "_____no_output_____" ] ], [ [ "Similar to `plot_top_losses()` but aimed at multi-labeled datasets. It plots misclassified samples sorted by their respective loss. \nSince you can have multiple labels for a single sample, they can easily overlap in a grid plot. So it plots just one sample per row. \nNote that you can pass `save_misclassified=True` (by default it's `False`). In such case, the method will return a list containing the misclassified images which you can use to debug your model and/or tune its hyperparameters. ", "_____no_output_____" ] ], [ [ "show_doc(ClassificationInterpretation.plot_confusion_matrix)", "_____no_output_____" ] ], [ [ "If [`normalize`](/vision.data.html#normalize), plots the percentages with `norm_dec` digits. `slice_size` can be used to avoid out of memory error if your set is too big. `kwargs` are passed to `plt.figure`.", "_____no_output_____" ] ], [ [ "interp.plot_confusion_matrix()", "_____no_output_____" ], [ "show_doc(ClassificationInterpretation.confusion_matrix)", "_____no_output_____" ], [ "interp.confusion_matrix()", "_____no_output_____" ], [ "show_doc(ClassificationInterpretation.most_confused)", "_____no_output_____" ] ], [ [ "#### Working with large datasets", "_____no_output_____" ], [ "When working with large datasets, memory problems can arise when computing the confusion matrix. For example, an error can look like this:\n\n RuntimeError: $ Torch: not enough memory: you tried to allocate 64GB. Buy new RAM!\n\nIn this case it is possible to force [`ClassificationInterpretation`](/train.html#ClassificationInterpretation) to compute the confusion matrix for data slices and then aggregate the result by specifying slice_size parameter. ", "_____no_output_____" ] ], [ [ "interp.confusion_matrix(slice_size=10)", "_____no_output_____" ], [ "interp.plot_confusion_matrix(slice_size=10)", "_____no_output_____" ], [ "interp.most_confused(slice_size=10)", "_____no_output_____" ] ], [ [ "## Undocumented Methods - Methods moved below this line will intentionally be hidden", "_____no_output_____" ], [ "## New Methods - Please document or move to the undocumented section", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ] ]
4a79c8736fe681df8924bc0079a380fb8539e5a2
287,751
ipynb
Jupyter Notebook
Documents/Fall/Big Data/Movie_Recommender.ipynb
jitender18/MyRepo
715b6eacdcaf72301dcf02dd3e437ce057ca3d27
[ "Apache-2.0" ]
null
null
null
Documents/Fall/Big Data/Movie_Recommender.ipynb
jitender18/MyRepo
715b6eacdcaf72301dcf02dd3e437ce057ca3d27
[ "Apache-2.0" ]
null
null
null
Documents/Fall/Big Data/Movie_Recommender.ipynb
jitender18/MyRepo
715b6eacdcaf72301dcf02dd3e437ce057ca3d27
[ "Apache-2.0" ]
null
null
null
33.95292
214
0.340892
[ [ [ "import numpy as np\nimport pandas as pd\nimport pyspark\nfrom pyspark import SparkContext, SparkConf\nfrom pyspark.sql import SQLContext\n\nfrom pyspark.sql.types import StringType\n\nsqlContext = SQLContext(sc)\nconf = SparkConf().setAppName(\"My App\").setMaster(\"local[*]\")\nsc.stop()\nsc = SparkContext(conf = conf)\n", "_____no_output_____" ] ], [ [ "## Read both the csv files", "_____no_output_____" ] ], [ [ "movies = sc.textFile('/Users/jitu/Documents/Fall/Big Data/Assignments/ml-latest-small/movies.csv')\nratings = sc.textFile('/Users/jitu/Documents/Fall/Big Data/Assignments/ml-latest-small/ratings.csv')", "_____no_output_____" ], [ "##### ------------- user input ----------------\nuserID = '1'\nmovieID = '31'\navgrating = '3.0'", "_____no_output_____" ] ], [ [ "## split input using , as a delimiter and exclude columns like genre and timestamp as I am not using those for recommendation", "_____no_output_____" ] ], [ [ "movie_rdd = movies.map(lambda line: line.split(',')[:-1]) ## Excluding Genre column\nratings_rdd = ratings.map(lambda line: line.split(',')[:-1]) ## Excluding Timestamp column\nmovie_rdd.collect()\n", "_____no_output_____" ] ], [ [ "## Filtering out header from movie RDD and selecting movie ID and movie name", "_____no_output_____" ] ], [ [ "movi = movie_rdd.filter(lambda x: x[0]!='movieId')\n#all_users = users.filter(lambda x: x[0:-2])\nall_movi = movi.map(lambda x: (x[0],x[1]))\nall_movi.collect()", "_____no_output_____" ] ], [ [ "## filtering out users using condition that movie ID should be equal to given movie ID and user ID should not be given user ID because we dont want those movies which user has already seen", "_____no_output_____" ] ], [ [ "users = ratings_rdd.filter(lambda x: x[1]==movieID and x[0]!=userID)\n#all_users = users.filter(lambda x: x[0:-2])\nall_users = users.map(lambda x: (x[0],1))\nall_users.collect()", "_____no_output_____" ] ], [ [ "## Here I have filtered out all movies which user has seen by filtering user ID from from RDD and selected only two columns user ID and movie ID", "_____no_output_____" ] ], [ [ "users1 = ratings_rdd.filter(lambda x: x[0]!=userID)\n#all_users1 = users.filter(lambda x: x[0:-2])\nall_users1 = users1.map(lambda x: (x[0],x[1]))\nall_users1.collect()", "_____no_output_____" ] ], [ [ "## joining all users who have seen given movie with RDD with all users and movies list\n## and selected only movie column and made a tuple (x,1) and then reduced it by key to get a count of users who have seen that particular movie. Finally we have a list of movies and users count for that movie", "_____no_output_____" ] ], [ [ "movies_list = all_users.join(all_users1).map(lambda x: (x[1][1],1)).reduceByKey(lambda amt1,amt2 : amt1+amt2)\nmovies_list.collect()", "_____no_output_____" ] ], [ [ "# Calculate average rating of a movie\n\n## first filtered out given user and header from RDD and then selected only movie ID and rating", "_____no_output_____" ] ], [ [ "ratings = ratings_rdd.filter(lambda x: (x[0]!=userID and x[0]!='userId'))\n#all_users1 = users.filter(lambda x: x[0:-2])\nall_ratings = ratings.map(lambda x: (x[1],x[2]))\nall_ratings.collect()", "_____no_output_____" ] ], [ [ "## Now, join movie list with rating RDD above \n## and selected only movie ID and reduced it by key to get count (This count will be used to get average rating)", "_____no_output_____" ] ], [ [ "count = movies_list.join(all_ratings).map(lambda x: (x[0],1)).reduceByKey(lambda amt1,amt2 : amt1+amt2)\ncount.collect()", "_____no_output_____" ] ], [ [ "## again join those two rdds and get total count of ratings", "_____no_output_____" ] ], [ [ "avg_ratings = movies_list.join(all_ratings).map(lambda x: (x[0],x[1][1])).reduceByKey(lambda amt1,amt2 : float(amt1)+float(amt2))\navg_ratings.collect()", "_____no_output_____" ] ], [ [ "## And join both RDDs with total ratings and total count to get average rating for that movie and sort it on based of ratings", "_____no_output_____" ] ], [ [ "avg_by_key = avg_ratings.join(count).map(lambda x: (x[0],(float(x[1][0])/float(x[1][1]))))\navg_by_key = avg_by_key.sortBy(lambda x: x[1], False)\navg_by_key.collect()", "_____no_output_____" ] ], [ [ "## Finally, join above RDD with movie name RDD to get the movie name which are recommended for users and select TOP 5", "_____no_output_____" ] ], [ [ "final_recom = avg_by_key.join(all_movi).map(lambda x: (x[1][0],x[1][1]))\nfinal_recom = final_recom.sortBy(lambda x: x[0], False)\n#final_recom.map(lambda x: (x[1]))\nfinal_recom.map\nfinal_recom.top(5)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a79d5be04bdcd96e643287abfa2593856c3745f
4,081
ipynb
Jupyter Notebook
esip-workshop/other-notebooks/Nexus Deployment and Ingestion.ipynb
dataplumber/nexus
f25a89e85eba098da9c6db1ff3d408dae8a6b310
[ "Apache-2.0" ]
23
2016-08-09T22:45:14.000Z
2020-02-17T08:18:29.000Z
esip-workshop/other-notebooks/Nexus Deployment and Ingestion.ipynb
lewismc/incubator-sdap-nexus
ff98fa346303431542b8391cc2a1bf7561d1bd03
[ "Apache-2.0" ]
6
2017-04-27T21:22:17.000Z
2021-06-01T21:45:52.000Z
esip-workshop/other-notebooks/Nexus Deployment and Ingestion.ipynb
dataplumber/nexus
f25a89e85eba098da9c6db1ff3d408dae8a6b310
[ "Apache-2.0" ]
5
2016-08-31T13:47:29.000Z
2017-11-14T21:45:22.000Z
20.611111
196
0.555011
[ [ [ "Step 1: Sign up / get assigned EC2 instance <br>\nStep 2: SSH into EC2 instance <br>\nStep 3: Start up the cluster by running the following commands <br>", "_____no_output_____" ] ], [ [ "cd ~/nexus/esip-workshop/docker/infrastructure\ndocker-compose up -d cassandra1\ndocker logs -f cassandra1", "_____no_output_____" ] ], [ [ "Step 4: Wait to see \"INFO 01:57:02 Starting listening for CQL clients on /0.0.0.0:9042…\" and then run", "_____no_output_____" ] ], [ [ "docker-compose up -d", "_____no_output_____" ] ], [ [ "Step 5: Verify services are running using", "_____no_output_____" ] ], [ [ "docker ps", "_____no_output_____" ] ], [ [ "There should be 9 containers running: 3 solr, 3 zookeeper, and 3 cassandra. If they are not all running, run docker-compose up -d again.", "_____no_output_____" ], [ "Step 6: Start the analysis cluster", "_____no_output_____" ] ], [ [ "cd ~/nexus/esip-workshop/docker/analysis\ndocker-compose up -d", "_____no_output_____" ] ], [ [ "Step 7: Verify services are running using", "_____no_output_____" ] ], [ [ "docker ps", "_____no_output_____" ] ], [ [ "There should be an additional 5 containers running: 3 mesos-agent, 1 mesos-master, 1 nexus-webapp", "_____no_output_____" ], [ "Step 8: Start the ingestion cluster", "_____no_output_____" ] ], [ [ "cd ~/nexus/esip-workshop/docker/ingest\ndocker-compose up -d", "_____no_output_____" ] ], [ [ "Step 9: Verify services are running using", "_____no_output_____" ] ], [ [ "docker ps", "_____no_output_____" ] ], [ [ "There should be an additional 9 containers running: 3 kafka, 3 xd-container, 1 redis, 1 mysqldb, 1 xd-admin", "_____no_output_____" ], [ "Ingest 2017 AVHRR data that has been staged under /home/ndeploy/ingest/data/avhrr/2017 <br>\n<br>\nStep 10: Deploy the stream to Spring XD using nx-deploy-stream.sh available inside xd-admin container", "_____no_output_____" ] ], [ [ "docker exec -it xd-admin /usr/local/nx-deploy-stream.sh --datasetName AVHRR_OI_L4_GHRSST_NCEI --dataDirectory /usr/local/data/nexus/avhrr/2017 --variableName analysed_sst --tilesDesired 1296", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
4a79dabf555494642fcdeabd0a622a21e28bb179
327,399
ipynb
Jupyter Notebook
content/ch-states/atoms-computation.ipynb
duartefrazao/qiskit-textbook
bce4bde0823e1f0c5a96776179c029f09e47b353
[ "Apache-2.0" ]
1
2021-07-23T08:01:44.000Z
2021-07-23T08:01:44.000Z
content/ch-states/atoms-computation.ipynb
duartefrazao/qiskit-textbook
bce4bde0823e1f0c5a96776179c029f09e47b353
[ "Apache-2.0" ]
1
2022-02-26T12:23:30.000Z
2022-02-26T12:23:30.000Z
content/ch-states/atoms-computation.ipynb
duartefrazao/qiskit-textbook
bce4bde0823e1f0c5a96776179c029f09e47b353
[ "Apache-2.0" ]
2
2021-03-14T12:30:28.000Z
2021-03-15T14:59:11.000Z
42.920687
565
0.513474
[ [ [ "# The Atoms of Computation", "_____no_output_____" ], [ " \nProgramming a quantum computer is now something that anyone can do in the comfort of their own home.\n\nBut what to create? What is a quantum program anyway? In fact, what is a quantum computer?\n\n\nThese questions can be answered by making comparisons to standard digital computers. Unfortunately, most people don’t actually understand how digital computers work either. In this article, we’ll look at the basics principles behind these devices. To help us transition over to quantum computing later on, we’ll do it using the same tools as we'll use for quantum.", "_____no_output_____" ], [ "## Contents\n\n1. [Splitting information into bits](#bits) \n2. [Computation as a Diagram](#diagram) \n3. [Your First Quantum Circuit](#first-circuit) \n4. [Example: Adder Circuit](#adder) \n 4.1 [Encoding an Input](#encoding) \n 4.2 [Remembering how to Add](#remembering-add) \n 4.3 [Adding with Qiskit](#adding-qiskit) ", "_____no_output_____" ], [ "Below is some Python code we'll need to run if we want to use the code in this page:", "_____no_output_____" ] ], [ [ "from qiskit import QuantumCircuit, assemble, Aer\nfrom qiskit.visualization import plot_histogram", "_____no_output_____" ] ], [ [ "## 1. Splitting information into bits <a id=\"bits\"></a>", "_____no_output_____" ], [ "The first thing we need to know about is the idea of bits. These are designed to be the world’s simplest alphabet. With only two characters, 0 and 1, we can represent any piece of information.\n\nOne example is numbers. You are probably used to representing a number through a string of the ten digits 0, 1, 2, 3, 4, 5, 6, 7, 8, and 9. In this string of digits, each digit represents how many times the number contains a certain power of ten. For example, when we write 9213, we mean\n\n\n\n$$ 9000 + 200 + 10 + 3 $$\n\n\n\nor, expressed in a way that emphasizes the powers of ten\n\n\n\n$$ (9\\times10^3) + (2\\times10^2) + (1\\times10^1) + (3\\times10^0) $$\n\n\n\nThough we usually use this system based on the number 10, we can just as easily use one based on any other number. The binary number system, for example, is based on the number two. This means using the two characters 0 and 1 to express numbers as multiples of powers of two. For example, 9213 becomes 10001111111101, since\n\n\n\n$$ 9213 = (1 \\times 2^{13}) + (0 \\times 2^{12}) + (0 \\times 2^{11})+ (0 \\times 2^{10}) +(1 \\times 2^9) + (1 \\times 2^8) + (1 \\times 2^7) \\\\\\\\ \\,\\,\\, + (1 \\times 2^6) + (1 \\times 2^5) + (1 \\times 2^4) + (1 \\times 2^3) + (1 \\times 2^2) + (0 \\times 2^1) + (1 \\times 2^0) $$\n\n\n\nIn this we are expressing numbers as multiples of 2, 4, 8, 16, 32, etc. instead of 10, 100, 1000, etc.\n<a id=\"binary_widget\"></a>", "_____no_output_____" ] ], [ [ "from qiskit_textbook.widgets import binary_widget\nbinary_widget(nbits=5)", "_____no_output_____" ] ], [ [ "These strings of bits, known as binary strings, can be used to represent more than just numbers. For example, there is a way to represent any text using bits. For any letter, number, or punctuation mark you want to use, you can find a corresponding string of at most eight bits using [this table](https://www.ibm.com/support/knowledgecenter/en/ssw_aix_72/com.ibm.aix.networkcomm/conversion_table.htm). Though these are quite arbitrary, this is a widely agreed-upon standard. In fact, it's what was used to transmit this article to you through the internet.\n\nThis is how all information is represented in computers. Whether numbers, letters, images, or sound, it all exists in the form of binary strings.\n\nLike our standard digital computers, quantum computers are based on this same basic idea. The main difference is that they use *qubits*, an extension of the bit to quantum mechanics. In the rest of this textbook, we will explore what qubits are, what they can do, and how they do it. In this section, however, we are not talking about quantum at all. So, we just use qubits as if they were bits.", "_____no_output_____" ], [ "### Quick Exercises\n1. Think of a number and try to write it down in binary.\n2. If you have $n$ bits, how many different states can they be in?", "_____no_output_____" ], [ "## 2. Computation as a diagram <a id=\"diagram\"></a>\n\nWhether we are using qubits or bits, we need to manipulate them in order to turn the inputs we have into the outputs we need. For the simplest programs with very few bits, it is useful to represent this process in a diagram known as a *circuit diagram*. These have inputs on the left, outputs on the right, and operations represented by arcane symbols in between. These operations are called 'gates', mostly for historical reasons.\n\nHere's an example of what a circuit looks like for standard, bit-based computers. You aren't expected to understand what it does. It should simply give you an idea of what these circuits look like.\n\n![image1](images/classical_circuit.png)\n\nFor quantum computers, we use the same basic idea but have different conventions for how to represent inputs, outputs, and the symbols used for operations. Here is the quantum circuit that represents the same process as above.\n\n![image2](images/quantum_circuit.png)\n\nIn the rest of this section, we will explain how to build circuits. At the end, you'll know how to create the circuit above, what it does, and why it is useful.", "_____no_output_____" ], [ "## 3. Your first quantum circuit <a id=\"first-circuit\"></a>", "_____no_output_____" ], [ "In a circuit, we typically need to do three jobs: First, encode the input, then do some actual computation, and finally extract an output. For your first quantum circuit, we'll focus on the last of these jobs. We start by creating a circuit with eight qubits and eight outputs.", "_____no_output_____" ] ], [ [ "n = 8\nn_q = n\nn_b = n\nqc_output = QuantumCircuit(n_q,n_b)", "_____no_output_____" ] ], [ [ "This circuit, which we have called `qc_output`, is created by Qiskit using `QuantumCircuit`. The number `n_q` defines the number of qubits in the circuit. With `n_b` we define the number of output bits we will extract from the circuit at the end.\n\nThe extraction of outputs in a quantum circuit is done using an operation called `measure`. Each measurement tells a specific qubit to give an output to a specific output bit. The following code adds a `measure` operation to each of our eight qubits. The qubits and bits are both labelled by the numbers from 0 to 7 (because that’s how programmers like to do things). The command `qc.measure(j,j)` adds a measurement to our circuit `qc` that tells qubit `j` to write an output to bit `j`.", "_____no_output_____" ] ], [ [ "for j in range(n):\n qc_output.measure(j,j)", "_____no_output_____" ] ], [ [ "Now that our circuit has something in it, let's take a look at it.", "_____no_output_____" ] ], [ [ "qc_output.draw()", "_____no_output_____" ] ], [ [ "Qubits are always initialized to give the output ```0```. Since we don't do anything to our qubits in the circuit above, this is exactly the result we'll get when we measure them. We can see this by running the circuit many times and plotting the results in a histogram. We will find that the result is always ```00000000```: a ```0``` from each qubit.", "_____no_output_____" ] ], [ [ "sim = Aer.get_backend('qasm_simulator') # this is the simulator we'll use\nqobj = assemble(qc_output) # this turns the circuit into an object our backend can run\nresult = sim.run(qobj).result() # we run the experiment and get the result from that experiment\n# from the results, we get a dictionary containing the number of times (counts)\n# each result appeared\ncounts = result.get_counts()\n# and display it on a histogram\nplot_histogram(counts)", "_____no_output_____" ] ], [ [ "The reason for running many times and showing the result as a histogram is because quantum computers may have some randomness in their results. In this case, since we aren’t doing anything quantum, we get just the ```00000000``` result with certainty.\n\nNote that this result comes from a quantum simulator, which is a standard computer calculating what an ideal quantum computer would do. Simulations are only possible for small numbers of qubits (~30 qubits), but they are nevertheless a very useful tool when designing your first quantum circuits. To run on a real device you simply need to replace ```Aer.get_backend('qasm_simulator')``` with the backend object of the device you want to use. ", "_____no_output_____" ], [ "## 4. Example: Creating an Adder Circuit <a id=\"adder\"></a>\n### 4.1 Encoding an input <a id=\"encoding\"></a>\n\nNow let's look at how to encode a different binary string as an input. For this, we need what is known as a NOT gate. This is the most basic operation that you can do in a computer. It simply flips the bit value: ```0``` becomes ```1``` and ```1``` becomes ```0```. For qubits, it is an operation called ```x``` that does the job of the NOT.\n\nBelow we create a new circuit dedicated to the job of encoding and call it `qc_encode`. For now, we only specify the number of qubits.", "_____no_output_____" ] ], [ [ "qc_encode = QuantumCircuit(n)\nqc_encode.x(7)\nqc_encode.draw()", "_____no_output_____" ] ], [ [ "Extracting results can be done using the circuit we have from before: `qc_output`. Adding the two circuits using `qc_encode + qc_output` creates a new circuit with everything needed to extract an output added at the end.", "_____no_output_____" ] ], [ [ "qc = qc_encode + qc_output\nqc.draw()", "_____no_output_____" ] ], [ [ "Now we can run the combined circuit and look at the results.", "_____no_output_____" ] ], [ [ "qobj = assemble(qc)\ncounts = sim.run(qobj).result().get_counts()\nplot_histogram(counts)", "_____no_output_____" ] ], [ [ "Now our computer outputs the string ```10000000``` instead.\n\nThe bit we flipped, which comes from qubit 7, lives on the far left of the string. This is because Qiskit numbers the bits in a string from right to left. Some prefer to number their bits the other way around, but Qiskit's system certainly has its advantages when we are using the bits to represent numbers. Specifically, it means that qubit 7 is telling us about how many $2^7$s we have in our number. So by flipping this bit, we’ve now written the number 128 in our simple 8-bit computer.\n\nNow try out writing another number for yourself. You could do your age, for example. Just use a search engine to find out what the number looks like in binary (if it includes a ‘0b’, just ignore it), and then add some 0s to the left side if you are younger than 64.", "_____no_output_____" ] ], [ [ "qc_encode = QuantumCircuit(n)\nqc_encode.x(1)\nqc_encode.x(5)\n\nqc_encode.draw()", "_____no_output_____" ] ], [ [ "Now we know how to encode information in a computer. The next step is to process it: To take an input that we have encoded, and turn it into an output that we need.", "_____no_output_____" ], [ "### 4.2 Remembering how to add <a id=\"remembering-add\"></a>", "_____no_output_____" ], [ "To look at turning inputs into outputs, we need a problem to solve. Let’s do some basic maths. In primary school, you will have learned how to take large mathematical problems and break them down into manageable pieces. For example, how would you go about solving the following?\n\n```\n 9213\n+ 1854\n= ????\n```\n\nOne way is to do it digit by digit, from right to left. So we start with 3+4\n```\n 9213\n+ 1854\n= ???7\n```\n\nAnd then 1+5\n```\n 9213\n+ 1854\n= ??67\n```\n\nThen we have 2+8=10. Since this is a two digit answer, we need to carry the one over to the next column.\n\n```\n 9213\n+ 1854\n= ?067\n ¹ \n```\n\nFinally we have 9+1+1=11, and get our answer\n\n```\n 9213\n+ 1854\n= 11067\n ¹ \n```\n\nThis may just be simple addition, but it demonstrates the principles behind all algorithms. Whether the algorithm is designed to solve mathematical problems or process text or images, we always break big tasks down into small and simple steps.\n\nTo run on a computer, algorithms need to be compiled down to the smallest and simplest steps possible. To see what these look like, let’s do the above addition problem again but in binary.\n\n\n```\n 10001111111101\n+ 00011100111110\n \n= ??????????????\n```\n\nNote that the second number has a bunch of extra 0s on the left. This just serves to make the two strings the same length.\n\nOur first task is to do the 1+0 for the column on the right. In binary, as in any number system, the answer is 1. We get the same result for the 0+1 of the second column.\n\n```\n 10001111111101\n+ 00011100111110\n\n= ????????????11 \n```\n\nNext, we have 1+1. As you’ll surely be aware, 1+1=2. In binary, the number 2 is written ```10```, and so requires two bits. This means that we need to carry the 1, just as we would for the number 10 in decimal.\n\n```\n 10001111111101\n+ 00011100111110\n= ???????????011 \n ¹ \n```\n\nThe next column now requires us to calculate ```1+1+1```. This means adding three numbers together, so things are getting complicated for our computer. But we can still compile it down to simpler operations, and do it in a way that only ever requires us to add two bits together. For this, we can start with just the first two 1s.\n\n```\n 1\n+ 1\n= 10\n```\n\nNow we need to add this ```10``` to the final ```1``` , which can be done using our usual method of going through the columns.\n\n```\n 10\n+ 01\n= 11\n```\n\nThe final answer is ```11``` (also known as 3).\n\nNow we can get back to the rest of the problem. With the answer of ```11```, we have another carry bit.\n\n```\n 10001111111101\n+ 00011100111110\n= ??????????1011\n ¹¹\n```\n\nSo now we have another 1+1+1 to do. But we already know how to do that, so it’s not a big deal.\n\nIn fact, everything left so far is something we already know how to do. This is because, if you break everything down into adding just two bits, there are only four possible things you’ll ever need to calculate. Here are the four basic sums (we’ll write all the answers with two bits to be consistent).\n\n```\n0+0 = 00 (in decimal, this is 0+0=0)\n0+1 = 01 (in decimal, this is 0+1=1)\n1+0 = 01 (in decimal, this is 1+0=1)\n1+1 = 10 (in decimal, this is 1+1=2)\n```\n\nThis is called a *half adder*. If our computer can implement this, and if it can chain many of them together, it can add anything.", "_____no_output_____" ], [ "### 4.3 Adding with Qiskit <a id=\"adding-qiskit\"></a>", "_____no_output_____" ], [ "Let's make our own half adder using Qiskit. This will include a part of the circuit that encodes the input, a part that executes the algorithm, and a part that extracts the result. The first part will need to be changed whenever we want to use a new input, but the rest will always remain the same.", "_____no_output_____" ], [ "![half adder implemented on a quantum circuit](images/half-adder.svg)\n", "_____no_output_____" ], [ "The two bits we want to add are encoded in the qubits 0 and 1. The above example encodes a ```1``` in both these qubits, and so it seeks to find the solution of ```1+1```. The result will be a string of two bits, which we will read out from the qubits 2 and 3. All that remains is to fill in the actual program, which lives in the blank space in the middle.\n\nThe dashed lines in the image are just to distinguish the different parts of the circuit (although they can have more interesting uses too). They are made by using the `barrier` command.\n\nThe basic operations of computing are known as logic gates. We’ve already used the NOT gate, but this is not enough to make our half adder. We could only use it to manually write out the answers. Since we want the computer to do the actual computing for us, we’ll need some more powerful gates.\n\nTo see what we need, let’s take another look at what our half adder needs to do.\n\n```\n0+0 = 00\n0+1 = 01\n1+0 = 01\n1+1 = 10\n```\n\nThe rightmost bit in all four of these answers is completely determined by whether the two bits we are adding are the same or different. So for ```0+0``` and ```1+1```, where the two bits are equal, the rightmost bit of the answer comes out ```0```. For ```0+1``` and ```1+0```, where we are adding different bit values, the rightmost bit is ```1```.\n\nTo get this part of our solution correct, we need something that can figure out whether two bits are different or not. Traditionally, in the study of digital computation, this is called an XOR gate.\n\n| Input 1 | Input 2 | XOR Output |\n|:-------:|:-------:|:------:|\n| 0 | 0 | 0 |\n| 0 | 1 | 1 |\n| 1 | 0 | 1 |\n| 1 | 1 | 0 |\n\nIn quantum computers, the job of the XOR gate is done by the controlled-NOT gate. Since that's quite a long name, we usually just call it the CNOT. In Qiskit its name is ```cx```, which is even shorter. In circuit diagrams, it is drawn as in the image below.", "_____no_output_____" ] ], [ [ "qc_cnot = QuantumCircuit(2)\nqc_cnot.cx(0,1)\nqc_cnot.draw()", "_____no_output_____" ] ], [ [ "This is applied to a pair of qubits. One acts as the control qubit (this is the one with the little dot). The other acts as the *target qubit* (with the big circle).\n\nThere are multiple ways to explain the effect of the CNOT. One is to say that it looks at its two input bits to see whether they are the same or different. Next, it overwrites the target qubit with the answer. The target becomes ```0``` if they are the same, and ```1``` if they are different.\n\n<img src=\"images/cnot_xor.svg\">\n\nAnother way of explaining the CNOT is to say that it does a NOT on the target if the control is ```1```, and does nothing otherwise. This explanation is just as valid as the previous one (in fact, it’s the one that gives the gate its name).\n\nTry the CNOT out for yourself by trying each of the possible inputs. For example, here's a circuit that tests the CNOT with the input ```01```.", "_____no_output_____" ] ], [ [ "qc = QuantumCircuit(2,2)\nqc.x(0)\nqc.cx(0,1)\nqc.measure(0,0)\nqc.measure(1,1)\nqc.draw()", "_____no_output_____" ] ], [ [ "If you execute this circuit, you’ll find that the output is ```11```. We can think of this happening because of either of the following reasons.\n\n- The CNOT calculates whether the input values are different and finds that they are, which means that it wants to output ```1```. It does this by writing over the state of qubit 1 (which, remember, is on the left of the bit string), turning ```01``` into ```11```.\n\n- The CNOT sees that qubit 0 is in state ```1```, and so applies a NOT to qubit 1. This flips the ```0``` of qubit 1 into a ```1```, and so turns ```01``` into ```11```.\n\nHere is a table showing all the possible inputs and corresponding outputs of the CNOT gate:\n\n| Input (q1 q0) | Output (q1 q0) |\n|:-------------:|:--------------:|\n| 00 | 00 |\n| 01 | 11 |\n| 10 | 10 |\n| 11 | 01 |\n\nFor our half adder, we don’t want to overwrite one of our inputs. Instead, we want to write the result on a different pair of qubits. For this, we can use two CNOTs.", "_____no_output_____" ] ], [ [ "qc_ha = QuantumCircuit(4,2)\n# encode inputs in qubits 0 and 1\nqc_ha.x(0) # For a=0, remove this line. For a=1, leave it.\nqc_ha.x(1) # For b=0, remove this line. For b=1, leave it.\nqc_ha.barrier()\n# use cnots to write the XOR of the inputs on qubit 2\nqc_ha.cx(0,2)\nqc_ha.cx(1,2)\nqc_ha.barrier()\n# extract outputs\nqc_ha.measure(2,0) # extract XOR value\nqc_ha.measure(3,1)\n\nqc_ha.draw()", "_____no_output_____" ] ], [ [ "We are now halfway to a fully working half adder. We just have the other bit of the output left to do: the one that will live on qubit 3.\n\nIf you look again at the four possible sums, you’ll notice that there is only one case for which this is ```1``` instead of ```0```: ```1+1```=```10```. It happens only when both the bits we are adding are ```1```.\n\nTo calculate this part of the output, we could just get our computer to look at whether both of the inputs are ```1```. If they are — and only if they are — we need to do a NOT gate on qubit 3. That will flip it to the required value of ```1``` for this case only, giving us the output we need.\n\nFor this, we need a new gate: like a CNOT but controlled on two qubits instead of just one. This will perform a NOT on the target qubit only when both controls are in state ```1```. This new gate is called the *Toffoli*. For those of you who are familiar with Boolean logic gates, it is basically an AND gate.\n\nIn Qiskit, the Toffoli is represented with the `ccx` command.", "_____no_output_____" ] ], [ [ "qc_ha = QuantumCircuit(4,2)\n# encode inputs in qubits 0 and 1\nqc_ha.x(0) # For a=0, remove the this line. For a=1, leave it.\nqc_ha.x(1) # For b=0, remove the this line. For b=1, leave it.\nqc_ha.barrier()\n# use cnots to write the XOR of the inputs on qubit 2\nqc_ha.cx(0,2)\nqc_ha.cx(1,2)\n# use ccx to write the AND of the inputs on qubit 3\nqc_ha.ccx(0,1,3)\nqc_ha.barrier()\n# extract outputs\nqc_ha.measure(2,0) # extract XOR value\nqc_ha.measure(3,1) # extract AND value\n\nqc_ha.draw()", "_____no_output_____" ] ], [ [ "In this example, we are calculating ```1+1```, because the two input bits are both ```1```. Let's see what we get.", "_____no_output_____" ] ], [ [ "qobj = assemble(qc_ha)\ncounts = sim.run(qobj).result().get_counts()\nplot_histogram(counts)", "_____no_output_____" ] ], [ [ "The result is ```10```, which is the binary representation of the number 2. We have built a computer that can solve the famous mathematical problem of 1+1!\n\nNow you can try it out with the other three possible inputs, and show that our algorithm gives the right results for those too.\n\nThe half adder contains everything you need for addition. With the NOT, CNOT, and Toffoli gates, we can create programs that add any set of numbers of any size.\n\nThese three gates are enough to do everything else in computing too. In fact, we can even do without the CNOT. Additionally, the NOT gate is only really needed to create bits with value ```1```. The Toffoli gate is essentially the atom of mathematics. It is the simplest element, from which every other problem-solving technique can be compiled.\n\nAs we'll see, in quantum computing we split the atom.", "_____no_output_____" ] ], [ [ "import qiskit\nqiskit.__qiskit_version__", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a79e66fb503b7e13c8aa32fbc9f71c4b1b19df6
71,956
ipynb
Jupyter Notebook
1_1_user_selection.ipynb
socialcomplab/recsys21-relistening-actr
4b677e55a4afd977326f45800e4693fca6d3c3f5
[ "MIT" ]
2
2021-08-05T08:07:30.000Z
2021-09-29T00:33:21.000Z
1_1_user_selection.ipynb
socialcomplab/recsys21-relistening-actr
4b677e55a4afd977326f45800e4693fca6d3c3f5
[ "MIT" ]
null
null
null
1_1_user_selection.ipynb
socialcomplab/recsys21-relistening-actr
4b677e55a4afd977326f45800e4693fca6d3c3f5
[ "MIT" ]
null
null
null
125.797203
16,160
0.882831
[ [ [ "%cd -q data/actr_reco", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\nimport tqdm\nimport numpy as np", "_____no_output_____" ], [ "with open(\"users.txt\", \"r\") as f:\n users = f.readlines()", "_____no_output_____" ], [ "hist = []\nfor user in tqdm.tqdm(users):\n user = user.strip()\n ret = !wc -l user_split/listening_events_2019_{user}.tsv\n lc, _ = ret[0].split(\" \")\n hist.append(int(lc))\nlen(hist), sum(hist)", "100%|██████████| 18316/18316 [16:21<00:00, 18.66it/s]\n" ], [ "plt.hist(hist, bins=100)\nplt.show()", "_____no_output_____" ], [ "subset = [x for x in hist if x < 30_000 and x >= 1_000]\nlen(subset)", "_____no_output_____" ], [ "plt.hist(subset, bins=100)\nplt.show()", "_____no_output_____" ], [ "plt.hist(subset, bins=5)\nplt.show()", "_____no_output_____" ], [ "plt.hist(subset, bins=10)\nplt.show()", "_____no_output_____" ], [ "plt.hist(subset, bins=10)", "_____no_output_____" ] ], [ [ "# Stratification", "_____no_output_____" ] ], [ [ "def stratification_numbers(data, min_value, max_value, bins, num_samples):\n subset = [x for x in data if x >= min_value and x < max_value]\n percentage = num_samples / len(subset)\n bin_size = int((max_value-min_value)/bins)\n \n num_per_bin = []\n old_boundary = min_value\n for new_boundary in range(min_value+bin_size, max_value+1, bin_size):\n data_in_bin = [x for x in subset if x >= old_boundary and x < new_boundary]\n num_per_bin.append(len(data_in_bin))\n old_boundary = new_boundary\n assert sum(num_per_bin) == len(subset)\n \n samples_per_bin = np.array(num_per_bin)*percentage\n floor_samples_per_bin = np.floor(samples_per_bin)\n error = int(round(sum(samples_per_bin) - sum(floor_samples_per_bin)))\n if error == 0:\n assert sum(floor_samples_per_bin) == num_samples\n return floor_samples_per_bin\n \n remainders = np.remainder(samples_per_bin, 1)\n to_adjust = np.argsort(remainders)[::-1][:error]\n for ta in to_adjust:\n floor_samples_per_bin[ta] += 1\n \n assert sum(floor_samples_per_bin) == num_samples\n return floor_samples_per_bin", "_____no_output_____" ], [ "samples_per_bin = stratification_numbers(hist, 1_000, 30_000, 10, num_samples=100)\nsamples_per_bin, sum(samples_per_bin)", "_____no_output_____" ], [ "stratification_numbers(hist, 1_000, 30_000, 10, 2)", "_____no_output_____" ] ], [ [ "# Iterative Stratified Sampling", "_____no_output_____" ] ], [ [ "test_hist = hist[len(test_users):]\nassert len(test_hist) == len(test_users)", "_____no_output_____" ], [ "test_user_interaction = list(zip(test_users, test_hist))\ntest_user_interaction[:2]", "_____no_output_____" ], [ "!wc -l user_split/listening_events_2019_61740.tsv", "7678 user_split/listening_events_2019_61740.tsv\n" ], [ "def get_bin_boundaries_from_config(bin_config=None):\n if not bin_config:\n bin_config = {\"min_value\": 1_000, \"max_value\": 30_000, \"bins\": 10}\n bin_size = int((bin_config[\"max_value\"]-bin_config[\"min_value\"])/bin_config[\"bins\"])\n return list(range(bin_config[\"min_value\"], bin_config[\"max_value\"]+1, bin_size))", "_____no_output_____" ], [ "def check_in_bin(item_value, target_bin, bin_config=None):\n bin_boundaries = get_bin_boundaries_from_config()\n return item_value >= bin_boundaries[target_bin] and item_value < bin_boundaries[target_bin+1]\n\nassert check_in_bin(2400, 0)\nassert not check_in_bin(5000, 0)\nassert check_in_bin(29_000, 9)", "_____no_output_____" ], [ "def get_next_for_bin(user_interactions, target_bin):\n iterlist = user_interactions.copy()\n for ui in user_interactions:\n if check_in_bin(ui[1], target_bin):\n iterlist.remove(ui)\n return ui[0], iterlist\n raise StopIteration(\"No remaing items for bin.\")", "_____no_output_____" ], [ "def list_index_difference(list1, list2):\n changed_indices = []\n for index, (first, second) in enumerate(zip(list1, list2)):\n if first != second:\n changed_indices.append(index)\n return changed_indices\n\nassert list_index_difference([0,1], [0,0]) == [1]", "_____no_output_____" ], [ "def iterative_sampling(user_interactions, max_size=1000, num_bins=10):\n iterlist = user_interactions.copy()\n bins = num_bins*[0]\n \n sampled_list = []\n \n mult_index_changes = []\n \n for i in tqdm.tqdm(range(1, max_size+1)):\n updated_bins = stratification_numbers(hist, 1_000, 30_000, 10, num_samples=i)\n changed_indices = list_index_difference(bins, updated_bins)\n if len(changed_indices) != 1:\n mult_index_changes.append(i)\n# print(f\"Multi-index change at pos {i}: {changed_indices} (old: {bins} vs new: {updated_bins}\")\n target_bin = changed_indices[0] # empirically increase the first change index, assuming items are in descending order\n bins[target_bin] += 1\n \n item, iterlist = get_next_for_bin(iterlist, target_bin)\n sampled_list.append(item)\n \n print(len(mult_index_changes))\n print(mult_index_changes[-3:])\n print(bins)\n return sampled_list", "_____no_output_____" ], [ "sampled_list = iterative_sampling(test_user_interaction, 150)\nlen(sampled_list)", "100%|██████████| 300/300 [00:02<00:00, 110.74it/s]" ], [ "# overlap\nlen(set(test_users[:300]).intersection(set(sampled_list[:150])))", "_____no_output_____" ], [ "with open(\"sampled.txt\", \"w\") as f:\n f.write(\"\".join(sampled_list))", "_____no_output_____" ], [ "!head sampled.txt", "103807\n73151\n61740\n37608\n30387\n90919\n35812\n28952\n94584\n95562\n" ], [ "!wc -l sampled.txt", "300 sampled.txt\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a79ed19e775a2bdb2829741deec13a86b1d39e7
54,049
ipynb
Jupyter Notebook
notebooks/minneapolis.ipynb
jkeefe/nypd-data
db258ca83dd6eb3d3864f0a3c5624ccf7e7c44af
[ "MIT" ]
3
2020-09-12T17:52:00.000Z
2021-01-09T18:21:21.000Z
notebooks/minneapolis.ipynb
jkeefe/nypd-data
db258ca83dd6eb3d3864f0a3c5624ccf7e7c44af
[ "MIT" ]
1
2021-08-23T20:59:04.000Z
2021-08-23T20:59:04.000Z
notebooks/minneapolis.ipynb
jkeefe/nypd-data
db258ca83dd6eb3d3864f0a3c5624ccf7e7c44af
[ "MIT" ]
3
2020-08-03T02:56:16.000Z
2022-02-21T21:29:38.000Z
33.591672
489
0.329664
[ [ [ "import pandas as pd", "_____no_output_____" ] ], [ [ "## Load in the \"rosetta stone\" file\n\nI made this file using QGIS, the open-source mapping software. I loaded in the US Census 2010 block-level shapefile for Hennipin County. I then used the block centroids, provided by the census, to colect them within each zone. Since the centroids, by nature, are a \"half a block\" from the nearest street, this is more reliable than a polygon-in-polygon calculation. I then inspected the map visually for outliers.\n\nI'll write up my steps for that soonest.", "_____no_output_____" ] ], [ [ "rosetta_df = pd.read_csv('../data/minneapolis/rosetta_nabes.csv')", "_____no_output_____" ], [ "rosetta_df", "_____no_output_____" ] ], [ [ "## Load in the population data", "_____no_output_____" ], [ "I downloaded the population files from [census.data.gov](https://census.data.gov). \n\nHere are the [P3 and P5 census table files for Cook County](https://s3.amazonaws.com/media.johnkeefe.net/census-by-precinct/17031_Cook_County.zip). And here is the [\"productDownload_2020-06-07T173132\" zip file](https://s3.amazonaws.com/media.johnkeefe.net/census-by-precinct/productDownload_2020-06-07T173132.zip). It's a little messy, and the census doesn't label the files well, but I'm providing them as I got them. The CSVs you need are in there! Adjust your paths accordingly.", "_____no_output_____" ] ], [ [ "# census P3 for county by block\np3_df = pd.read_csv('/Volumes/JK_Smarts_Data/precinct_project/MN/productDownload_2020-06-19T224000/DECENNIALSF12010.P3_data_with_overlays_2020-06-19T223910.csv')", "_____no_output_____" ], [ "p3_df", "_____no_output_____" ], [ "p3_df.reset_index()\np3_df.drop(0, inplace=True)", "_____no_output_____" ], [ "p5_df = pd.read_csv('/Volumes/JK_Smarts_Data/precinct_project/MN/productDownload_2020-06-19T224000/DECENNIALSF12010.P5_data_with_overlays_2020-06-19T223910.csv')", "_____no_output_____" ], [ "p5_df.reset_index()\np5_df.drop(0, inplace=True)", "_____no_output_____" ], [ "p3_df.shape, p5_df.shape", "_____no_output_____" ], [ "population_df = p3_df.merge(p5_df, on='GEO_ID')", "_____no_output_____" ], [ "population_df.shape", "_____no_output_____" ], [ "population_df", "_____no_output_____" ], [ "rosetta_df.shape", "_____no_output_____" ], [ "rosetta_df.dtypes", "_____no_output_____" ], [ "population_df.dtypes", "_____no_output_____" ], [ "population_df['GEOID10'] = population_df['GEO_ID'].str[9:].astype(int)", "_____no_output_____" ], [ "population_df.drop(columns=['NAME_y'], inplace = True)", "_____no_output_____" ], [ "## Add demographic data to each chicago PD district block\nblock_data = rosetta_df.merge(population_df, on=\"GEOID10\", how=\"left\")", "_____no_output_____" ], [ "block_data.shape", "_____no_output_____" ], [ "block_data", "_____no_output_____" ], [ "# need to make all those columns numeric\nblock_data[['P003001', 'P003002', 'P003003', 'P003004',\n 'P003005', 'P003006', 'P003007', 'P003008', 'P005001', 'P005002',\n 'P005003', 'P005004', 'P005005', 'P005006', 'P005007', 'P005008',\n 'P005009', 'P005010', 'P005011', 'P005012', 'P005013', 'P005014',\n 'P005015', 'P005016', 'P005017']] = block_data[['P003001', 'P003002', 'P003003', 'P003004',\n 'P003005', 'P003006', 'P003007', 'P003008', 'P005001', 'P005002',\n 'P005003', 'P005004', 'P005005', 'P005006', 'P005007', 'P005008',\n 'P005009', 'P005010', 'P005011', 'P005012', 'P005013', 'P005014',\n 'P005015', 'P005016', 'P005017']].apply(pd.to_numeric)", "_____no_output_____" ], [ "block_data.to_csv('./temp_data/mpls_2010blocks_2020nabes_population.csv', index=False)", "_____no_output_____" ] ], [ [ "-----------------------", "_____no_output_____" ], [ "**Note**: I stopped here because I'm going to publish the rest using Datasette", "_____no_output_____" ], [ "Done!", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ] ]
4a79ee0bd702a7d9b6bd3226cfaccc2279a55399
6,385
ipynb
Jupyter Notebook
docs/samples/client/kfserving_sdk_sample.ipynb
harshavardhana/kfserving
7f61564d4dd98f21b4c79150eb6901ec8a6ab557
[ "Apache-2.0" ]
2
2020-12-03T22:45:44.000Z
2020-12-28T03:52:28.000Z
docs/samples/client/kfserving_sdk_sample.ipynb
harshavardhana/kfserving
7f61564d4dd98f21b4c79150eb6901ec8a6ab557
[ "Apache-2.0" ]
22
2020-02-20T15:27:01.000Z
2022-02-10T01:26:52.000Z
docs/samples/client/kfserving_sdk_sample.ipynb
harshavardhana/kfserving
7f61564d4dd98f21b4c79150eb6901ec8a6ab557
[ "Apache-2.0" ]
2
2020-10-06T09:24:31.000Z
2020-12-20T15:10:56.000Z
26.493776
224
0.552858
[ [ [ "# Sample for KFServing SDK ", "_____no_output_____" ], [ "This is a sample for KFServing SDK. \n\nThe notebook shows how to use KFServing SDK to create, get, rollout_canary, promote and delete InferenceService.", "_____no_output_____" ] ], [ [ "from kubernetes import client\n\nfrom kfserving import KFServingClient\nfrom kfserving import constants\nfrom kfserving import utils\nfrom kfserving import V1alpha2EndpointSpec\nfrom kfserving import V1alpha2PredictorSpec\nfrom kfserving import V1alpha2TensorflowSpec\nfrom kfserving import V1alpha2InferenceServiceSpec\nfrom kfserving import V1alpha2InferenceService\nfrom kubernetes.client import V1ResourceRequirements", "_____no_output_____" ] ], [ [ "Define namespace where InferenceService needs to be deployed to. If not specified, below function defines namespace to the current one where SDK is running in the cluster, otherwise it will deploy to default namespace.", "_____no_output_____" ] ], [ [ "namespace = utils.get_default_target_namespace()", "_____no_output_____" ] ], [ [ "## Define InferenceService", "_____no_output_____" ], [ "Firstly define default endpoint spec, and then define the inferenceservice basic on the endpoint spec.", "_____no_output_____" ] ], [ [ "api_version = constants.KFSERVING_GROUP + '/' + constants.KFSERVING_VERSION\ndefault_endpoint_spec = V1alpha2EndpointSpec(\n predictor=V1alpha2PredictorSpec(\n tensorflow=V1alpha2TensorflowSpec(\n storage_uri='gs://kfserving-samples/models/tensorflow/flowers',\n resources=V1ResourceRequirements(\n requests={'cpu':'100m','memory':'1Gi'},\n limits={'cpu':'100m', 'memory':'1Gi'}))))\n \nisvc = V1alpha2InferenceService(api_version=api_version,\n kind=constants.KFSERVING_KIND,\n metadata=client.V1ObjectMeta(\n name='flower-sample', namespace=namespace),\n spec=V1alpha2InferenceServiceSpec(default=default_endpoint_spec))", "_____no_output_____" ] ], [ [ "## Create InferenceService", "_____no_output_____" ], [ "Call KFServingClient to create InferenceService.", "_____no_output_____" ] ], [ [ "KFServing = KFServingClient()\nKFServing.create(isvc)", "_____no_output_____" ] ], [ [ "## Check the InferenceService", "_____no_output_____" ] ], [ [ "KFServing.get('flower-sample', namespace=namespace, watch=True, timeout_seconds=120)", "_____no_output_____" ] ], [ [ "## Add Canary to InferenceService", "_____no_output_____" ], [ "Firstly define canary endpoint spec, and then rollout 10% traffic to the canary version, watch the rollout process.", "_____no_output_____" ] ], [ [ "canary_endpoint_spec = V1alpha2EndpointSpec(\n predictor=V1alpha2PredictorSpec(\n tensorflow=V1alpha2TensorflowSpec(\n storage_uri='gs://kfserving-samples/models/tensorflow/flowers-2',\n resources=V1ResourceRequirements(\n requests={'cpu':'100m','memory':'1Gi'},\n limits={'cpu':'100m', 'memory':'1Gi'}))))\n\nKFServing.rollout_canary('flower-sample', canary=canary_endpoint_spec, percent=10,\n namespace=namespace, watch=True, timeout_seconds=120)", "_____no_output_____" ] ], [ [ "## Rollout more traffic to canary of the InferenceService", "_____no_output_____" ], [ "Rollout traffice percent to 50% to canary version.", "_____no_output_____" ] ], [ [ "KFServing.rollout_canary('flower-sample', percent=50, namespace=namespace,\n watch=True, timeout_seconds=120)", "_____no_output_____" ] ], [ [ "## Promote Canary to Default", "_____no_output_____" ] ], [ [ "KFServing.promote('flower-sample', namespace=namespace, watch=True, timeout_seconds=120)", "_____no_output_____" ] ], [ [ "## Delete the InferenceService", "_____no_output_____" ] ], [ [ "KFServing.delete('flower-sample', namespace=namespace)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a7a01829a46cb05db3d34e00a02964341aa7ee6
22,876
ipynb
Jupyter Notebook
finance/assign1_soln.ipynb
kishlaya/assignments
1aa76e32d7e5059499a93359cb52118ccbf07028
[ "MIT" ]
2
2019-11-17T09:28:32.000Z
2020-08-14T17:40:34.000Z
finance/assign1_soln.ipynb
kishlaya/assignments
1aa76e32d7e5059499a93359cb52118ccbf07028
[ "MIT" ]
null
null
null
finance/assign1_soln.ipynb
kishlaya/assignments
1aa76e32d7e5059499a93359cb52118ccbf07028
[ "MIT" ]
null
null
null
44.077071
284
0.549921
[ [ [ "# Assignment Submission for FMUP\n## Kishlaya Jaiswal\n### Chennai Mathematical Institute - MCS201909\n---\n", "_____no_output_____" ], [ "# Solution 1", "_____no_output_____" ], [ "I have choosen the following stocks from Nifty50:\n- Kotak Mahindra Bank Ltd (KOTAKBANK)\n- Hindustan Unilever Ltd (HINDUNILVR)\n- Nestle India Limited (NESTLEIND)\n\n\nNote:\n- I am doing these computations on Apr 2, 2021, and hence using the closing price for this day as my strike price.\n- I am using the historical data for the month of February to find the volatility of each of these stocks (volatility computation is done at the end)\n\n", "_____no_output_____" ] ], [ [ "import QuantLib as ql\n\n\n# function to find the price and greeks for a given option\n# with it's strike/spot price and it's volatility\n\ndef find_price_greeks(spot_price, strike_price, volatility, option_type):\n # construct the European Option\n payoff = ql.PlainVanillaPayoff(option_type, strike_price)\n exercise = ql.EuropeanExercise(maturity_date)\n european_option = ql.VanillaOption(payoff, exercise)\n\n # quote the spot price\n spot_handle = ql.QuoteHandle(\n ql.SimpleQuote(spot_price)\n )\n flat_ts = ql.YieldTermStructureHandle(\n ql.FlatForward(calculation_date, risk_free_rate, day_count)\n )\n dividend_yield = ql.YieldTermStructureHandle(\n ql.FlatForward(calculation_date, dividend_rate, day_count)\n )\n flat_vol_ts = ql.BlackVolTermStructureHandle(\n ql.BlackConstantVol(calculation_date, calendar, volatility, day_count)\n )\n \n # create the Black Scholes process\n bsm_process = ql.BlackScholesMertonProcess(spot_handle, \n dividend_yield, \n flat_ts, \n flat_vol_ts)\n\n # set the engine to use the above process\n european_option.setPricingEngine(ql.AnalyticEuropeanEngine(bsm_process))\n \n return european_option\n\n\ntickers = [\"KOTAKBANK\", \"HINDUNILVR\", \"NESTLEIND\"]\n\n# spot price = closing price as on Mar 1, 2021\nspot = {\"KOTAKBANK\":1845.35, \n \"HINDUNILVR\":2144.70, \n \"NESTLEIND\":16288.20}\n\n# strike price = closing price as on Apr 2, 2021\nstrike = {\"KOTAKBANK\":1804.45, \n \"HINDUNILVR\":2399.45, \n \"NESTLEIND\":17102.15}\n\n# historical volatility from the past month's data\nvol = {\"KOTAKBANK\":0.38, \n \"HINDUNILVR\":0.15, \n \"NESTLEIND\":0.18}\n\n# date of option purchase\ncalculation_date = ql.Date(1,3,2021)\n\n# exercise date\n# this excludes the holidays in the Indian calendar\ncalendar = ql.India()\nperiod = ql.Period(65, ql.Days)\nmaturity_date = calendar.advance(calculation_date, period)\n\n# rate of interest\nrisk_free_rate = 0.06\n\n# other settings\ndividend_rate = 0.0\nday_count = ql.Actual365Fixed()\nql.Settings.instance().evaluationDate = calculation_date\n\n# store final variables for future calculations\ndelta = {}\ngamma = {}\nvega = {}\n\n# print settings\nformat_type_head = \"{:<15}\" + (\"{:<12}\" * 7)\nformat_type = \"{:<15}{:<12}\" + (\"{:<12.2f}\" * 6)\nprint(format_type_head.format(\"Name\", \"Type\", \"Price\", \"Delta\", \"Gamma\", \"Rho\", \"Theta\", \"Vega\"))\nprint()\n\nfor ticker in tickers:\n option = find_price_greeks(spot[ticker], strike[ticker], vol[ticker], ql.Option.Call)\n print(format_type.format(ticker, \"Call\", option.NPV(), \n option.delta(), option.gamma(), \n option.rho(), option.theta(), option.vega()))\n \n delta[ticker] = option.delta()\n gamma[ticker] = option.gamma()\n vega[ticker] = option.vega()\n \n option = find_price_greeks(spot[ticker], strike[ticker], vol[ticker], ql.Option.Put)\n print(format_type.format(ticker, \"Put\", option.NPV(), \n option.delta(), option.gamma(), \n option.rho(), option.theta(), option.vega()))\n \n print()\n", "Name Type Price Delta Gamma Rho Theta Vega \n\nKOTAKBANK Call 175.18 0.62 0.00 244.54 -323.09 356.05 \nKOTAKBANK Put 106.90 -0.38 0.00 -208.25 -216.47 356.05 \n\nHINDUNILVR Call 8.08 0.11 0.00 56.41 -72.03 199.56 \nHINDUNILVR Put 226.43 -0.89 0.00 -545.68 69.76 199.56 \n\nNESTLEIND Call 363.26 0.37 0.00 1456.88 -1442.77 3113.31 \nNESTLEIND Put 917.75 -0.63 0.00 -2834.54 -432.21 3113.31 \n\n" ] ], [ [ "### Delta Gamma Vega neutrality\n\nFirst we make the Gamma and Vega neutral by taking \n- x units of KOTAKBANK\n- y units of HINDUNILVR\n- 1 unit of NESTLEIND\n\nTo solve for x,y we have the following:", "_____no_output_____" ] ], [ [ "import numpy as np\n\nG1, G2, G3 = gamma[\"KOTAKBANK\"], gamma[\"HINDUNILVR\"], gamma[\"NESTLEIND\"]\nV1, V2, V3 = vega[\"KOTAKBANK\"], vega[\"HINDUNILVR\"], vega[\"NESTLEIND\"]\n\n# Solve the following equation:\n# G1 x + G2 y + G3 = 0\n# V1 x + V2 y + V3 = 0\n\nA = np.array([[G1, G2], [V1, V2]])\nb = np.array([-G3, -V3])\nz = np.linalg.solve(A, b)\n\nprint(\"x = {:.2f}\".format(z[0]))\nprint(\"y = {:.2f}\".format(z[1]))\nprint()\n\nfinal_delta = z[0]*delta[\"KOTAKBANK\"] + z[1]*delta[\"HINDUNILVR\"] + delta[\"NESTLEIND\"]\nprint(\"Delta of portfolio is {:.2f}\".format(final_delta))", "x = -18.46\ny = 17.34\n\nDelta of portfolio is -9.13\n" ] ], [ [ "## Final Strategy\n\n- Take a short position of 18.46 units of Kotak Mahindra Bank Ltd Call Option\n- Take a long position of 17.34 units of Hindustan Unilever Ltd Call Option\n- Take a long position of 1 unit of Nestle India Limited Call Option\n- Take a long position of 9.13 units of Nestle India Limited Stock\n\nThis will yield a portfolio with Delta, Gamma and Vega neutral.", "_____no_output_____" ], [ "# Solution 2", "_____no_output_____" ], [ "Using Taylor expansion, we get\n$$\\Delta P = \\frac{\\partial P}{\\partial y} \\Delta y + \\frac12 \\frac{\\partial^2 P}{\\partial y^2}(\\Delta y)^2$$\n$$\\implies \\frac{\\Delta P}{P} = -D \\Delta y + \\frac12 C (\\Delta y)^2$$\n\nwhere $D$ denotes duration and $C$ denotes convexity of a bond.\n\nWe remark that the duration of the bonds we are comparing are same and fixed.\n\n---\n\n<p>With that being said, let's say the interest rates fall, then we have $$\\Delta y < 0 \\implies - D \\Delta y + C \\Delta y^2 > 0 \\implies \\Delta P > 0$$\nNow for the bond with greater convexity, $C \\Delta y^2$ has a large value hence $\\Delta P$ has to be large and so we get that \"Greater convexity translates into greater price gains as interest rates fall\"\n</p>\n\n---\n\nNow suppose interest rates rise that is $\\Delta y > 0$, then we $-D \\Delta y < 0$, that is the price of the bonds decreases but the bond with greater convexity will add up for a large $C \\Delta y^2$ also and so the price decrease will be less for bond with high convexity.\n\nThis explains \"Lessened price declines as interest rates rise\"", "_____no_output_____" ], [ "# Solution 3", "_____no_output_____" ] ], [ [ "import QuantLib as ql\n\n# function to calculate coupon value\ndef find_coupon(pv, r, m, n):\n discount_factor = (r/m) / (1 - (1 + r/m)**(-n*m))\n C = pv * discount_factor\n return C\n\n# loan settings\nloan_amt = 0.8*1000000\nrate = 0.12\npay = find_coupon(loan_amt, rate, 12, 5)\nmonth = ql.Date(15,8,2021)\nperiod = ql.Period('1m')\n\n# print settings\nprint(\"Monthly coupon is: {:.2f}\".format(pay))\nprint()\nformat_type = \"{:<15}\" * 4\nprint(format_type.format(\"Date\", \"Interest\", \"Principal\", \"Remaining\"))\n\n\nwhile loan_amt > 0:\n interest = loan_amt * rate / 12\n principal = pay - interest\n loan_amt = loan_amt - principal\n print(format_type.format(month.ISO(), \"{:.2f}\".format(interest), \"{:.2f}\".format(principal), \"{:.2f}\".format(loan_amt)))\n\n if round(loan_amt) == 0:\n break\n month = month + period", "Monthly coupon is: 17795.56\n\nDate Interest Principal Remaining \n2021-08-15 8000.00 9795.56 790204.44 \n2021-09-15 7902.04 9893.51 780310.93 \n2021-10-15 7803.11 9992.45 770318.48 \n2021-11-15 7703.18 10092.37 760226.11 \n2021-12-15 7602.26 10193.30 750032.81 \n2022-01-15 7500.33 10295.23 739737.58 \n2022-02-15 7397.38 10398.18 729339.40 \n2022-03-15 7293.39 10502.16 718837.23 \n2022-04-15 7188.37 10607.19 708230.05 \n2022-05-15 7082.30 10713.26 697516.79 \n2022-06-15 6975.17 10820.39 686696.40 \n2022-07-15 6866.96 10928.59 675767.80 \n2022-08-15 6757.68 11037.88 664729.92 \n2022-09-15 6647.30 11148.26 653581.67 \n2022-10-15 6535.82 11259.74 642321.92 \n2022-11-15 6423.22 11372.34 630949.58 \n2022-12-15 6309.50 11486.06 619463.52 \n2023-01-15 6194.64 11600.92 607862.60 \n2023-02-15 6078.63 11716.93 596145.67 \n2023-03-15 5961.46 11834.10 584311.57 \n2023-04-15 5843.12 11952.44 572359.12 \n2023-05-15 5723.59 12071.97 560287.16 \n2023-06-15 5602.87 12192.69 548094.47 \n2023-07-15 5480.94 12314.61 535779.86 \n2023-08-15 5357.80 12437.76 523342.10 \n2023-09-15 5233.42 12562.14 510779.96 \n2023-10-15 5107.80 12687.76 498092.20 \n2023-11-15 4980.92 12814.64 485277.57 \n2023-12-15 4852.78 12942.78 472334.78 \n2024-01-15 4723.35 13072.21 459262.57 \n2024-02-15 4592.63 13202.93 446059.64 \n2024-03-15 4460.60 13334.96 432724.68 \n2024-04-15 4327.25 13468.31 419256.37 \n2024-05-15 4192.56 13602.99 405653.37 \n2024-06-15 4056.53 13739.02 391914.35 \n2024-07-15 3919.14 13876.41 378037.93 \n2024-08-15 3780.38 14015.18 364022.75 \n2024-09-15 3640.23 14155.33 349867.42 \n2024-10-15 3498.67 14296.88 335570.54 \n2024-11-15 3355.71 14439.85 321130.69 \n2024-12-15 3211.31 14584.25 306546.44 \n2025-01-15 3065.46 14730.09 291816.34 \n2025-02-15 2918.16 14877.39 276938.95 \n2025-03-15 2769.39 15026.17 261912.78 \n2025-04-15 2619.13 15176.43 246736.35 \n2025-05-15 2467.36 15328.19 231408.15 \n2025-06-15 2314.08 15481.48 215926.68 \n2025-07-15 2159.27 15636.29 200290.39 \n2025-08-15 2002.90 15792.65 184497.73 \n2025-09-15 1844.98 15950.58 168547.15 \n2025-10-15 1685.47 16110.09 152437.06 \n2025-11-15 1524.37 16271.19 136165.88 \n2025-12-15 1361.66 16433.90 119731.98 \n2026-01-15 1197.32 16598.24 103133.74 \n2026-02-15 1031.34 16764.22 86369.52 \n2026-03-15 863.70 16931.86 69437.65 \n2026-04-15 694.38 17101.18 52336.47 \n2026-05-15 523.36 17272.19 35064.28 \n2026-06-15 350.64 17444.92 17619.36 \n2026-07-15 176.19 17619.36 0.00 \n" ] ], [ [ "### Volatility Computation for Problem 1", "_____no_output_____" ] ], [ [ "import math\n\ndef get_volatility(csv):\n data = csv.split('\\n')[1:]\n data = map(lambda x: x.split(','), data)\n closing_prices = list(map(lambda x: float(x[-2]), data))\n \n n = len(closing_prices)\n \n log_returns = []\n for i in range(1,n):\n log_returns.append(math.log(closing_prices[i]/closing_prices[i-1]))\n \n mu = sum(log_returns)/(n-1)\n \n tmp = map(lambda x: (x-mu)**2, log_returns)\n \n vol = math.sqrt(sum(tmp)/(n-1)) * math.sqrt(252)\n return vol\n\nkotak_csv = '''Date,Open,High,Low,Close,Adj Close,Volume\n2021-02-01,1730.000000,1810.000000,1696.250000,1801.349976,1801.349976,220763\n2021-02-02,1825.000000,1878.650024,1801.349976,1863.500000,1863.500000,337556\n2021-02-03,1875.000000,1882.349976,1820.099976,1851.849976,1851.849976,147146\n2021-02-04,1857.900024,1914.500000,1831.050049,1911.250000,1911.250000,188844\n2021-02-05,1921.000000,1997.900024,1915.000000,1982.550049,1982.550049,786773\n2021-02-08,1995.000000,2029.949951,1951.949951,1956.300049,1956.300049,212114\n2021-02-09,1950.000000,1975.000000,1938.000000,1949.199951,1949.199951,62613\n2021-02-10,1954.550049,1961.849976,1936.300049,1953.650024,1953.650024,143830\n2021-02-11,1936.000000,1984.300049,1936.000000,1961.300049,1961.300049,120121\n2021-02-12,1966.000000,1974.550049,1945.599976,1951.449951,1951.449951,86860\n2021-02-15,1954.000000,1999.000000,1954.000000,1986.199951,1986.199951,135074\n2021-02-16,1995.000000,2048.949951,1995.000000,2021.650024,2021.650024,261589\n2021-02-17,2008.500000,2022.400024,1969.500000,1989.150024,1989.150024,450365\n2021-02-18,1980.000000,1982.349976,1938.000000,1945.300049,1945.300049,193234\n2021-02-19,1945.000000,1969.599976,1925.050049,1937.300049,1937.300049,49189\n2021-02-22,1941.000000,1961.650024,1921.650024,1948.550049,1948.550049,44651\n2021-02-23,1955.000000,1961.900024,1867.000000,1873.150024,1873.150024,118138\n2021-02-24,1875.199951,1953.949951,1852.000000,1919.000000,1919.000000,454695\n2021-02-25,1935.000000,1964.949951,1886.900024,1895.349976,1895.349976,195212\n2021-02-26,1863.000000,1868.000000,1773.099976,1782.349976,1782.349976,180729'''\n\nhind_csv = '''Date,Open,High,Low,Close,Adj Close,Volume\n2021-02-01,2265.000000,2286.000000,2226.550049,2249.149902,2249.149902,130497\n2021-02-02,2271.000000,2275.000000,2207.699951,2231.850098,2231.850098,327563\n2021-02-03,2234.000000,2256.699951,2218.199951,2232.600098,2232.600098,121232\n2021-02-04,2234.000000,2258.449951,2226.949951,2247.050049,2247.050049,533609\n2021-02-05,2252.000000,2285.000000,2241.000000,2270.350098,2270.350098,254911\n2021-02-08,2275.000000,2287.000000,2233.000000,2237.800049,2237.800049,211465\n2021-02-09,2247.000000,2254.000000,2211.199951,2216.649902,2216.649902,171285\n2021-02-10,2216.649902,2240.000000,2213.449951,2235.899902,2235.899902,185915\n2021-02-11,2245.000000,2267.500000,2235.000000,2262.399902,2262.399902,121168\n2021-02-12,2270.000000,2270.649902,2232.199951,2241.899902,2241.899902,33016\n2021-02-15,2252.000000,2261.500000,2212.100098,2215.850098,2215.850098,91240\n2021-02-16,2225.000000,2228.399902,2190.500000,2196.899902,2196.899902,101652\n2021-02-17,2191.000000,2200.000000,2160.300049,2164.649902,2164.649902,138504\n2021-02-18,2165.000000,2168.449951,2143.050049,2147.750000,2147.750000,110272\n2021-02-19,2150.000000,2193.649902,2148.000000,2181.149902,2181.149902,150398\n2021-02-22,2200.000000,2201.699951,2161.100098,2167.250000,2167.250000,98782\n2021-02-23,2173.550049,2192.000000,2169.399902,2177.949951,2177.949951,22743\n2021-02-24,2179.000000,2183.949951,2104.250000,2181.600098,2181.600098,329265\n2021-02-25,2190.000000,2190.000000,2160.000000,2163.600098,2163.600098,357853\n2021-02-26,2151.149902,2182.000000,2122.000000,2132.050049,2132.050049,158925'''\n\nnestle_csv = '''Date,Open,High,Low,Close,Adj Close,Volume\n2021-02-01,17162.099609,17277.000000,16996.449219,17096.949219,17096.949219,3169\n2021-02-02,17211.000000,17328.099609,16800.000000,17189.349609,17189.349609,3852\n2021-02-03,17247.449219,17284.000000,17064.349609,17155.400391,17155.400391,2270\n2021-02-04,17250.000000,17250.000000,17054.800781,17073.199219,17073.199219,13193\n2021-02-05,17244.000000,17244.000000,17019.949219,17123.300781,17123.300781,2503\n2021-02-08,17199.949219,17280.000000,17107.349609,17213.550781,17213.550781,7122\n2021-02-09,17340.000000,17510.699219,17164.050781,17325.800781,17325.800781,2714\n2021-02-10,17396.900391,17439.300781,17083.800781,17167.699219,17167.699219,3341\n2021-02-11,17167.699219,17442.000000,17165.550781,17416.650391,17416.650391,2025\n2021-02-12,17449.849609,17500.000000,17241.000000,17286.099609,17286.099609,3486\n2021-02-15,17290.000000,17500.000000,17280.000000,17484.500000,17484.500000,1927\n2021-02-16,17600.000000,17634.599609,17141.250000,17222.449219,17222.449219,7901\n2021-02-17,16900.000000,16900.000000,16360.000000,16739.900391,16739.900391,28701\n2021-02-18,17050.000000,17050.000000,16307.000000,16374.150391,16374.150391,13711\n2021-02-19,16395.000000,16477.599609,16214.450195,16386.099609,16386.099609,5777\n2021-02-22,16400.000000,16531.050781,16024.599609,16099.200195,16099.200195,9051\n2021-02-23,16123.000000,16250.000000,16003.000000,16165.250000,16165.250000,6261\n2021-02-24,16249.000000,16800.000000,15900.000000,16369.950195,16369.950195,18003\n2021-02-25,16394.699219,16394.699219,16102.000000,16114.349609,16114.349609,18735\n2021-02-26,16075.000000,16287.200195,16010.000000,16097.700195,16097.700195,13733'''\n\nprint(\"Annualized Volatility of KOTAKBANK is {:.2f}%\".format(get_volatility(kotak_csv)*100))\nprint(\"Annualized Volatility of HINDUNILVR is {:.2f}%\".format(get_volatility(hind_csv)*100))\nprint(\"Annualized Volatility of NESTLEIND is {:.2f}%\".format(get_volatility(nestle_csv)*100))", "Annualized Volatility of KOTAKBANK is 38.66%\nAnnualized Volatility of HINDUNILVR is 15.16%\nAnnualized Volatility of NESTLEIND is 18.86%\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a7a0382f89436c9b934ce3e43c265c1655aeefa
729,327
ipynb
Jupyter Notebook
coverage/10-comparison_cnvnator_virus/compare_cnvnator_virus.ipynb
sequana/resources
0d68286563c3bbdf37a8eb14a2ec5bd7eaa28ec0
[ "BSD-3-Clause" ]
null
null
null
coverage/10-comparison_cnvnator_virus/compare_cnvnator_virus.ipynb
sequana/resources
0d68286563c3bbdf37a8eb14a2ec5bd7eaa28ec0
[ "BSD-3-Clause" ]
null
null
null
coverage/10-comparison_cnvnator_virus/compare_cnvnator_virus.ipynb
sequana/resources
0d68286563c3bbdf37a8eb14a2ec5bd7eaa28ec0
[ "BSD-3-Clause" ]
null
null
null
950.88266
77,824
0.951499
[ [ [ "# Sequana_coverage versus CNVnator (viral genome)\n\nThis notebook compares CNVnator, CNOGpro and sequana_coverage behaviour on a viral genome instance (same as in the virus notebook).\n\n\nVersions used:\n- sequana 0.7.0", "_____no_output_____" ] ], [ [ "%pylab inline\nmatplotlib.rcParams['figure.figsize'] = [10,7]", "Populating the interactive namespace from numpy and matplotlib\n" ] ], [ [ "Here below, we provide the results of the sequana_coverage and CNVNator analysis as files within this directory itself. Nevertheless, if you need to rerun the analysis yourself, you can still do it but you need to generate the BAM file for CNVnator (see virus notebook). For sequana_coverage, you could use the BED file directly, which is also used here below for the plotting. This BED file can simply be downloaded as follows.", "_____no_output_____" ] ], [ [ "!wget https://github.com/sequana/resources/raw/master/coverage/JB409847.bed.bz2\n!bunzip2 JB409847.bed.bz2", "--2018-07-23 22:54:23-- https://github.com/sequana/resources/raw/master/coverage/JB409847.bed.bz2\nResolving github.com (github.com)... 192.30.253.112, 192.30.253.113\nConnecting to github.com (github.com)|192.30.253.112|:443... connected.\nHTTP request sent, awaiting response... 302 Found\nLocation: https://raw.githubusercontent.com/sequana/resources/master/coverage/JB409847.bed.bz2 [following]\n--2018-07-23 22:54:24-- https://raw.githubusercontent.com/sequana/resources/master/coverage/JB409847.bed.bz2\nResolving raw.githubusercontent.com (raw.githubusercontent.com)... 151.101.120.133\nConnecting to raw.githubusercontent.com (raw.githubusercontent.com)|151.101.120.133|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 63485 (62K) [application/octet-stream]\nSaving to: ‘JB409847.bed.bz2’\n\nJB409847.bed.bz2 100%[===================>] 62.00K --.-KB/s in 0.02s \n\n2018-07-23 22:54:24 (3.91 MB/s) - ‘JB409847.bed.bz2’ saved [63485/63485]\n\n" ] ], [ [ "# The coverage signal", "_____no_output_____" ], [ "Let us have a quick look at the coverage itself.", "_____no_output_____" ] ], [ [ "from sequana import GenomeCov\nb = GenomeCov(\"JB409847.bed\")\nchromosome = b.chr_list[0]\nchromosome.run(4001, 2, circular=True)", "_____no_output_____" ], [ "chromosome.plot_coverage()\n_ = ylim([0,1500])", "_____no_output_____" ] ], [ [ "What you see are two long deleted regions of about 700 bases (one fully deleted) and two 3-bases deleted regions on the right hand side. Our goal is to detect those 4 events automatically.", "_____no_output_____" ], [ "# Get ROIs using sequana_coverage", "_____no_output_____" ], [ "We ran the analysis with circular chromosome set on and a window parameter of\n- 1000\n- 2000\n- 3000\n- 4000\n\nFor example:\n```\nsequana_coverage -o -input JB409847.bed --no-html --no-multiqc -w 3000\n```\n\nResults are available in 4 files for your convenience: e.g. rois_1000.csv, rois_4000.csv\n\nNote that the window size cannot be larger than a fifth of the genome length from the command line. Would you need to force it, you would need to use the library itself.", "_____no_output_____" ], [ "# Get ROIs using CNVnator", "_____no_output_____" ], [ "```\ncnvnator -root out1.root -tree JB409847.bed \ncnvnator -root out1.root -his 1\ncnvnator -root out1.root -stat 1\ncnvnator -root out1.root -partition 1 -ngc\ncnvnator -root out1.root -call 1 -ngc > events_bin1.txt\n```", "_____no_output_____" ], [ "The default parameter of 100 gives no detection. We then used bin=1, 5, 10, 20 and stored the results in:\n\n- events_bin1.txt\n- events_bin5.txt\n- events_bin10.txt\n- events_bin20.txt", "_____no_output_____" ], [ "# How events are detected using CNVnator?", "_____no_output_____" ] ], [ [ "from sequana.cnv import CNVnator", "_____no_output_____" ], [ "cnv1 = CNVnator(\"events_bin1.txt\").df\ncnv5 = CNVnator(\"events_bin5.txt\").df\ncnv10 = CNVnator(\"events_bin10.txt\").df\ncnv20 = CNVnator(\"events_bin20.txt\").df", "_____no_output_____" ], [ "def plot_rois_cnv(cnv):\n chromosome.plot_coverage()\n for _, this in cnv.iterrows():\n type_ = this['type']\n positions = [this.start, this.end]\n if type_ == \"deletion\":\n fill_between(positions, 0, 1000)\n else:\n fill_between(positions, 1000,2000)\n ylim([0,2000]) ", "_____no_output_____" ] ], [ [ "## binning of 20 and 10", "_____no_output_____" ] ], [ [ "plot_rois_cnv(cnv10)", "_____no_output_____" ] ], [ [ "Here, the two main events on the left hand side (deleted regions of several hundreds of bases) are detected. However, the short deleted regions on the right hand side (a few bases) are not. ", "_____no_output_____" ], [ "\n\n# Binning of 5", "_____no_output_____" ] ], [ [ "plot_rois_cnv(cnv5)", "_____no_output_____" ] ], [ [ "Here again, the two main deleted regions are detected but the two short events\nare not. There is also a false detection at position 0 and around 4000", "_____no_output_____" ], [ "# Binning of 1", "_____no_output_____" ] ], [ [ "plot_rois_cnv(cnv1)", "_____no_output_____" ] ], [ [ "binning of 1 is too small: the low deleted regions are detected but then, the small \n deleted regions are not and there are lots of upper regions classified as duplicated CNVs.", "_____no_output_____" ], [ "# How events are detected using sequana_coverage ?", "_____no_output_____" ] ], [ [ "# W can be 1000,2000,3000,4000\nimport pandas as pd\ndef plot_rois_sequana(W):\n assert W in [2000, 3000, 4000, 1000,5000, \"4000_t3\"]\n if W == \"4000_t3\":\n chromosome.run(4001, circular=True)\n else:\n chromosome.run(W+1, circular=True)\n chromosome.plot_coverage(sample=False)\n rois = pd.read_csv(\"rois_{}.csv\".format(W))\n for _, this in rois.iterrows():\n #if this.max_zscore >-4.5 and this.max_zscore<4.5 and this.max_cov!=0:\n # continue \n positions = [this.start, this.end]\n \n if abs(this.mean_zscore) >12: col=\"red\"\n elif abs(this.mean_zscore) >8: col=\"orange\"\n elif abs(this.mean_zscore) >4: col=\"yellow\"\n \n if this.mean_zscore > 0:\n fill_between(positions, 1000, 2000, color=col) \n else:\n fill_between(positions, 0, 1000, color=col)\n if this.end-this.start < 100:\n axvline(this.start, ymax=0.5, lw=3, color=\"r\")\n ylim([0,2000])", "_____no_output_____" ], [ "plot_rois_sequana(5000)", "_____no_output_____" ], [ "plot_rois_sequana(4000)", "_____no_output_____" ], [ "plot_rois_sequana(3000)", "_____no_output_____" ] ], [ [ "Using W=2000, 3000, 4000 the results are robust and consistent: the same ROIs are detected. In particular, the 2 long and 2 short deleted regions are detected; \n \nIn addition, some short events not fully deleted are also systematically detected. The detection seems coherent visually except maybe for the duplicated events at position 5000, which could be considered a false detection. Note, however, that the zscore associated with this events could be used to discard the events (mean zscore of 5)", "_____no_output_____" ] ], [ [ "rois = pd.read_csv(\"rois_4000.csv\")\nrois[[\"start\", \"end\", \"size\", \"mean_rm\", \"max_zscore\", \"log2_ratio\"]]", "_____no_output_____" ] ], [ [ "Now, if we decrease the W parameter to 1000, we may miss the large deleted regions, \nwhich length is 711 and 782 (to detect those events we recommend to use W as\ntwice this length so about 1500)", "_____no_output_____" ] ], [ [ "plot_rois_sequana(1000)", "_____no_output_____" ] ], [ [ "So, here we still detect the two deleted regions but we see that the running \nmedian fits the data too closely. Consequently, we have some \nfalse detections at position 0, 6600, 17923 to cite a few examples. ", "_____no_output_____" ], [ "# CNOGpro detection", "_____no_output_____" ], [ "Running CNOGpro manually using window length of 100 and 10, we get these \nresults", "_____no_output_____" ] ], [ [ "CNOGpro_10 = [[3521, 4220, 0], [4221, 4230, 5], \n [4241,4250,0], [4251,4260,3],\n [5681,5740,0], [19771,19795,0]]\nCNOGpro_100 = [[3601,4200,0], [4201,4300,3], [4301,4400,2]]", "_____no_output_____" ], [ "plot_rois_sequana(4000)\nfor this in CNOGpro_10:\n plot(this[0:2], [1000, 1000], \"ob-\")", "_____no_output_____" ], [ "plot_rois_sequana(4000)\nfor this in CNOGpro_100:\n axhline(1000, this[0], 1000, color=\"r\", lw=20)\n plot(this[0:2], [1000, 1000], \"ob-\")", "_____no_output_____" ] ], [ [ "# Conclusions", "_____no_output_____" ], [ "On a viral genome (length 18000), we use sequana_coverage and cnvnator to detect events of interests that can be seen by eyes on the coverage signals that is one deleted region of about 700 bases, one depleted region of about 700 and two short deleted regions of a few bases long.\n\nSequana_coverage detects those 4 events with the default parameter (window length set to a fifth of the full genome length). It is also robust with respect to the parameter (2000, 3000, 4000 that give the same results). Note, however, that a window of 1000 is too short and lead to possibly false detections. Those false detections could be easily removed using the mean zscore associated with these events.\n\nCNVnator detects the two long deleted events. However, the two short deleted ones are systematically missed, which is not surprising since CNVnator is designed to detect long regions. Depending of the bin parameter, there are a few false detections. Decreasing to a bin of 1, the results are difficult to assess since most of the genome is classified as a mix of regions of interests.\n\nCNOGpro detects the two long deleted events. However, their exact duration is not correct. Short events are missed using bins of 5, 10, 100, 200.\n\n**computation time:**\n\n- sequana_coverage: (with --no-html and --no-multiqc options), sequana_coverage takes 2.5 seconds irrespective of the window parameter\n- cnvnator: 8.5 s for bin=5 or 10, 18s for bins=1", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ] ]
4a7a0cd440f7ebf956d33180bdaed011ef11a7e3
126,777
ipynb
Jupyter Notebook
RL_A2C_2N_TF2.ipynb
gcfer/reinforcement-learning
91f4f1fc48cdb427709f92c1e1ea87a4862cf297
[ "MIT" ]
null
null
null
RL_A2C_2N_TF2.ipynb
gcfer/reinforcement-learning
91f4f1fc48cdb427709f92c1e1ea87a4862cf297
[ "MIT" ]
null
null
null
RL_A2C_2N_TF2.ipynb
gcfer/reinforcement-learning
91f4f1fc48cdb427709f92c1e1ea87a4862cf297
[ "MIT" ]
null
null
null
147.073086
64,074
0.861387
[ [ [ "<a href=\"https://colab.research.google.com/github/gcfer/reinforcement-learning/blob/main/RL_A2C_2N_TF2.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Reinforcement Learning: A2C (Actor-Critic Method) — Two Networks", "_____no_output_____" ], [ "## Overview", "_____no_output_____" ], [ "In this notebook, we'll cover the actor-critic framework and implement `A2C`, a state-of-the-art actor-critic algorithm. We'll test it by solving the cartpole problem in the Open AI gym. ", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport datetime\n\n# Import tensorflow\n#!pip install tensorflow-gpu==1.14.0 > /dev/null 2>&1\nimport tensorflow as tf\nimport tensorflow.keras as K\nprint(tf.__version__)\n\n# Check that tf sees the GPU\ndevice_name = tf.test.gpu_device_name()\nprint(device_name)\n\n# Import libraries for plotting\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nplt.style.use('seaborn-pastel')\n\n%matplotlib inline\n%config InlineBackend.figure_format = 'retina' # this makes plot in high res", "2.3.0\n/device:GPU:0\n" ] ], [ [ "Since we are in a remote notebook, we cannot display the progress of the environment in real time. Instead, we store the renderings and show a video at the end of the episode (refer to [this](https://star-ai.github.io/Rendering-OpenAi-Gym-in-Colaboratory/) guide in case you need it). The only advice that I can give is to import `gym` _after_ the update below.", "_____no_output_____" ] ], [ [ "#remove \" > /dev/null 2>&1\" to see what is going on under the hood\n!pip install gym pyvirtualdisplay > /dev/null 2>&1\n!apt-get install -y xvfb python-opengl ffmpeg > /dev/null 2>&1", "_____no_output_____" ], [ "# Maybe\n# !apt-get update > /dev/null 2>&1\n# !apt-get install cmake > /dev/null 2>&1\n# !pip install --upgrade setuptools 2>&1\n# !pip install ez_setup > /dev/null 2>&1\n# !pip install gym[atari] > /dev/null 2>&1", "_____no_output_____" ], [ "# Open AI gym\nimport gym \nfrom gym import logger as gymlogger\nfrom gym.wrappers import Monitor\ngymlogger.set_level(40) #error only\nimport math\nimport random\nimport glob\nimport io\nimport base64\nfrom IPython.display import HTML\nfrom IPython import display as ipythondisplay", "_____no_output_____" ], [ "from pyvirtualdisplay import Display\ndisplay = Display(visible=0, size=(2880, 1800))\ndisplay.start()", "_____no_output_____" ] ], [ [ "The function below is needed to display the video. I slightly modified it from the original one (that you can in the guide I linked above) to avoid the infinite repetition loop of the video.", "_____no_output_____" ] ], [ [ "\"\"\"\nUtility functions to enable video recording of gym environment and displaying it\nTo enable video, just do \"env = wrap_env(env)\"\"\n\"\"\"\n\ndef show_video():\n mp4list = glob.glob('video/*.mp4')\n if len(mp4list) > 0:\n mp4 = mp4list[0]\n video = io.open(mp4, 'r+b').read()\n encoded = base64.b64encode(video)\n ipythondisplay.display(HTML(data='''<video alt=\"test\" autoplay \n loop controls style=\"height: 400px;\">\n <source src=\"data:video/mp4;base64,{0}\" type=\"video/mp4\" />\n </video>'''.format(encoded.decode('ascii'))))\n else: \n print(\"Could not find video\")\n \n\ndef wrap_env(env):\n env = Monitor(env, './video', force=True)\n return env", "_____no_output_____" ] ], [ [ "## OpenAI Gym Cartpole", "_____no_output_____" ], [ "The Cartpole problem is a discrete control problem where we try to keep the pole vertical by moving the cart below it. \n\nUpon loading the environment, we launch a simulation where the agent chooses at random from the action sample space the next action. Finally, we show the video of the result. What happens is that the problem is considered unsolved (= game over) if the angle between pole and the line orthogonal to the cart axis is larger than a threshold. The parameter `done` specifies when the experiment is over.", "_____no_output_____" ] ], [ [ "# Load the environment and start\nenv = wrap_env(gym.make(\"CartPole-v0\"))", "_____no_output_____" ], [ "observation = env.reset()\n\nwhile True:\n \n env.render()\n \n action = env.action_space.sample() \n \n observation, reward, done, info = env.step(action) \n \n if done: \n break;\n \nenv.close()", "_____no_output_____" ], [ "show_video()", "_____no_output_____" ] ], [ [ "To better understand the inputs and outputs of the environment, let consider the action space and the observation space.", "_____no_output_____" ], [ "The action space is of type `Discrete(n)` where `n` is the number of actions. This is equivalent to the discrete set $\\{ 0, 1, 2, \\dotsc, n-1 \\}$.", "_____no_output_____" ] ], [ [ "env.action_space", "_____no_output_____" ] ], [ [ "The observation space is of type `Box(n)`, which means that it is the Cartesian product of `n` intervals.", "_____no_output_____" ] ], [ [ "env.observation_space", "_____no_output_____" ], [ "[env.observation_space.low, env.observation_space.high]", "_____no_output_____" ] ], [ [ "When we make a step in the environment, the feedback that we get includes the observation:", "_____no_output_____" ] ], [ [ "#env = gym.make('CartPole-v0')\n# env = Monitor(env, './video', force=True)\nenv.reset()\n\nobs, r, done, _ = env.step(0)\nprint(obs)", "[ 0.00978377 -0.2103974 -0.0444823 0.26758958]\n" ] ], [ [ "## Actor-Critic Algorithms\n\nActor-critic algorithms are policy gradient algorithms where the actor is driven by the value- or Q-function estimated by the critic. The role of the actor is to find the policy according to the policy gradient method. The role of the critic is to discover the value- or Q-function, and feed back the information about “how good” the action taken in the current state was. \n\nIn policy gradient algorithms, the policy is updated on the basis of the discouted reward that is experienced. Let us recall that the policy $\\pi_\\theta$ is fit such that \n$$ \\theta^* = \\arg\\max_\\theta \\mathbf{E}\\!\\left[R(\\tau)\\sum_{t=0}^T \\log \\pi_\\theta(a_t|s_t)\\right] $$\nwhere\n$$ R(\\tau) =\\sum_{t=0}^T\\gamma^t r(s_t,a_t) $$\nand $\\tau=(s_0, a_0, r_0, \\cdots, s_{T}, a_{T}, r_{T})$.\n\nIn practice, we fit the network so as to minimize the loss\n\n$$ L(\\tau) = - \\sum_{t=0}^T R(\\tau_{t}) \\log (\\pi_\\theta(a_t)), $$\n\nwhere $\\tau_t=(s_t, a_t, r_t, \\cdots, s_{T}, a_{T}, r_{T})$, at the end of each episode, and repeat the process over many episodes. We also know that, instead of using $R$, we can use a modified pre-log factor so as to reduce the variance in the learning process. This is accomplished through baselines, that is some function $b(s_t)$ that is removed from $R$:\n\n$$ L(\\tau) = - \\sum_{t=0}^T [R(\\tau_{t})-b(s_t)] \\log (\\pi_\\theta(a_t)). $$\n\nThe fundamental problem with this approach is that the actor just tries actions and waits to see the reward at the end of the episode. This is equivalent to perfoming a task and by trial and error learning what to do better next time. \n\nIt would be much better if, while performing the task, we could receive a feedback. The critic network is then introduced to do exactly that. How does the critic performs the critics though? It implements a DQN to find the Q-function or, in a simpler version, the value function. \n\nIn other words, this is equivalent to having a teacher, but the teacher is learning while teaching. It is intuitive then that the critic needs to learn sooner than the actor how to perform its task, and so we'll set the learning rate of the critic larger than the one of the actor.\n\nWhat the critic does is to infer the value function, which is $V(s_t)=\\mathbf{E}[R(s_t)]$ with the best policy. Suppose the critic converges and learns it. Then we can set the value function as the baseline, $b(s):=V(s)$. The resulting pre-log factor,\n\n$$ L(\\tau) = - \\sum_{t=0}^T A(\\tau_{t}) \\log (\\pi_\\theta(a_t)), $$\n\nis called advantage. This is why `A2C` is called advantage actor critic.\n\n\n", "_____no_output_____" ] ], [ [ "# A2C - Two networks\nclass A2C_2N:\n \n def __init__(self, state_size, action_size, gamma=None, max_steps=None):\n # max_steps is the maximum number of batches [s, a, r, s_] or epochs remembered\n \n # Parameters\n self.state_size = state_size\n self.action_size = action_size\n \n self.memory = list()\n if gamma is None: \n self.gamma = 0.99\n else: \n self.gamma = gamma\n \n if max_steps is None:\n self.max_steps = 200 \n else:\n self.max_steps = max_steps\n\n # learning rates\n self.actor_lr = 0.0008\n self.critic_lr = 0.0025\n\n # actor network\n self.actor = self.build_actor()\n\n # critic network\n self.critic = self.build_critic()\n\n def remember(self, s, a, r, s_, done):\n self.memory.append([s, a, r, s_, done])\n if len(self.memory) > self.max_steps: # if too long\n self.memory.pop(0) # forget the oldest\n \n def forget(self):\n self.memory = list()\n\n # actor learns the policy: input is state; output is distribution over actions (policy)\n def build_actor(self, n_hidden_1=None, n_hidden_2=None):\n \n if n_hidden_1 == None:\n n_hidden_1 = 6 * self.state_size\n \n if n_hidden_2 == None:\n n_hidden_2 = 6 * self.state_size\n\n model = K.Sequential()\n model.add(K.layers.Dense(n_hidden_1, activation=tf.nn.elu, input_dim=self.state_size)) # first hidden layer\n model.add(K.layers.Dense(n_hidden_2, activation=tf.nn.elu))\n model.add(K.layers.Dense(self.action_size, activation='softmax')) # output\n \n # loss is categorical_crossentropy since pi_theta (vector) should be equal to one-hot action (vector) eventually\n # because there is always a best action to be taken\n model.compile(optimizer=K.optimizers.RMSprop(lr=self.actor_lr), loss='categorical_crossentropy') \n \n return model\n\n # critic network\n def build_critic(self, n_hidden_1=None, n_hidden_2=None):\n \n if n_hidden_1 == None:\n n_hidden_1 = 6 * self.state_size\n \n if n_hidden_2 == None:\n n_hidden_2 = 6 * self.state_size\n\n model = K.Sequential()\n model.add(K.layers.Dense(n_hidden_1, activation=tf.nn.elu, input_dim=self.state_size)) # first hidden layer\n model.add(K.layers.Dense(n_hidden_2, activation=tf.nn.elu))\n model.add(K.layers.Dense(1, activation=tf.nn.elu)) # output\n \n model.compile(optimizer=K.optimizers.Adam(lr=self.critic_lr), loss='mse') \n \n return model\n\n # actor implements policy gradient\n def policy(self, s):\n policy = self.actor.predict(s, batch_size=1).flatten()\n a = np.random.choice(self.action_size, 1, p=policy)[0]\n return a\n\n # learn from memory\n def learn(self):\n\n # replay the entire episode\n s, a, r, s_, done = zip(*self.memory)\n a = np.reshape(a, (-1, 1))\n \n T = a.shape[0] # epochs in memory\n a_one_hot = np.zeros((T, self.action_size))\n a_one_hot[np.arange(T), a.reshape(-1)] = 1 # size: T x action_size\n s = np.concatenate(s) # or np.vstack(s) \n target_actor = a_one_hot # actions\n \n cum_reward = np.cumsum((self.gamma ** np.arange(0, T)) * r)/(self.gamma ** np.arange(0, T))\n R = np.flip(cum_reward).reshape(-1, 1)\n v = self.critic.predict(s)\n A = R - v # theoretical advantage (infinite-horizon problems)\n # s_ = np.concatenate(s_)\n # v_ = self.critic.predict(s_)\n # r = np.reshape(r, (-1, 1))\n # A = r + self.gamma * v_ - v # advantage (same as above but works better in finite-horizon problems)\n\n self.actor.fit(s, target_actor, sample_weight=A, epochs=1, verbose=0) # uses advantages\n self.critic.fit(s, R, epochs=1, verbose=0) # trained to get the value function ", "_____no_output_____" ] ], [ [ "## Training\n\n", "_____no_output_____" ] ], [ [ "seed = 0\nnp.random.seed(seed)\ntf.random.set_seed(seed)\n\n# Restart environment\n# env = Monitor(env, './video', force=True)\nMAX_REWARD = 200\nenv._max_episode_steps = MAX_REWARD\n\n# Parameters\nn_episodes = 350\nwinning_streak = 10 # after this number of successive successes, training stops\nreward_history = np.zeros(n_episodes)\ngamma = 0.99\nsteps_in_memory = 200 # number of steps to remember\n\nA = np.arange(env.action_space.n)\ndim_state_space = env.observation_space.shape[0]\n\n# Start training\nagent = A2C_2N(dim_state_space, env.action_space.n, gamma, steps_in_memory)\n\n# init\ns = env.reset()\ns = np.reshape(s, [1, dim_state_space])\n\ntemplate = \"\\rEpisode: {:3d}/{:3d} | Reward: {:3.0f} | Duration: {:.2f} s\"", "_____no_output_____" ], [ "for e in range(n_episodes):\n start_time = datetime.datetime.now()\n\n s = env.reset()\n s = np.reshape(s, [1, dim_state_space])\n \n done = False\n cum_reward = 0\n \n while not done:\n a = agent.policy(s)\n s_, r, done, _ = env.step(a)\n s_ = np.reshape(s_, [1, dim_state_space])\n agent.remember(s, a, r, s_, done)\n cum_reward += r\n s = s_\n \n agent.learn()\n agent.forget()\n \n dt = datetime.datetime.now() - start_time\n print(template.format(e+1, n_episodes, cum_reward, dt.total_seconds()), end='')\n \n reward_history[e] = cum_reward", "Episode: 350/350 | Reward: 200 | Duration: 6.82 s" ], [ "plt.plot(reward_history[0:e], label='Reward')\nplt.xlabel('Episodes')\nplt.ylabel('Cumulative reward')\nplt.tight_layout()\nplt.show()", "_____no_output_____" ] ], [ [ "## Trying it", "_____no_output_____" ] ], [ [ "env = wrap_env(gym.make(\"CartPole-v0\"))\ns = env.reset()\ns = np.reshape(s, [1, dim_state_space])\n\ndone = False\ncum_reward = 0\n\nwhile not done:\n\n env.render()\n a = agent.policy(s)\n s_, r, done, _ = env.step(a)\n s_ = np.reshape(s_, [1, dim_state_space])\n agent.remember(s, a, r, s_, done)\n cum_reward += r\n s = s_\n\nenv.close()\n\nprint('We got a reward equal to {:.0f}'.format(cum_reward))", "We got a reward equal to 200\n" ], [ "show_video()", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
4a7a153991f2cbdab645d70796b8be9dbffb4d0b
3,787
ipynb
Jupyter Notebook
books/Python-for-Data-Analysis/02_2.ipynb
topin27/notes
fb0793347a44bc95fddb9ce41ea5a7cfac6dfe47
[ "Apache-2.0" ]
null
null
null
books/Python-for-Data-Analysis/02_2.ipynb
topin27/notes
fb0793347a44bc95fddb9ce41ea5a7cfac6dfe47
[ "Apache-2.0" ]
null
null
null
books/Python-for-Data-Analysis/02_2.ipynb
topin27/notes
fb0793347a44bc95fddb9ce41ea5a7cfac6dfe47
[ "Apache-2.0" ]
null
null
null
21.39548
111
0.543702
[ [ [ "%matplotlib inline\nimport pandas as pd\nimport numpy as np", "_____no_output_____" ], [ "unames = ['user_id', 'gender', 'age', 'occupation', 'zip']\nusers = pd.read_table('pydata-book/ch02/movielens/users.dat', sep='::', header=None, names=unames)\n\nrnames = ['user_id', 'movie_id', 'rating', 'timestamp']\nratings = pd.read_table('pydata-book/ch02/movielens/ratings.dat', sep='::', header=None, names=rnames)\n\nmnames = ['movie_id', 'title', 'genres']\nmovies = pd.read_table('pydata-book/ch02/movielens/movies.dat', sep='::', header=None, names=mnames)", "_____no_output_____" ], [ "print users[:5]\nprint ratings[:5]\nmovies[:5]", "_____no_output_____" ], [ "data = pd.merge(pd.merge(ratings, users), movies)\ndata", "_____no_output_____" ], [ "mean_ratings = data.pivot_table('rating', index='title', columns='gender', aggfunc='mean')\nmean_ratings[:5]", "_____no_output_____" ], [ "ratings_by_title = data.groupby('title').size()\nratings_by_title[:10]", "_____no_output_____" ], [ "active_titles = ratings_by_title.index[ratings_by_title >= 250]\nactive_titles", "_____no_output_____" ], [ "?pd.DataFrame.pivot_table", "_____no_output_____" ], [ "mean_ratings = mean_ratings.ix[active_titles]\nmean_ratings", "_____no_output_____" ], [ "top_female_ratings = mean_ratings.sort_index(by='F', ascending=False)\ntop_female_ratings[:10]", "_____no_output_____" ], [ "mean_ratings['diff'] = mean_ratings['M'] - mean_ratings['F']\nsorted_by_diff = mean_ratings.sort_index(by='diff')\nsorted_by_diff[:15]", "_____no_output_____" ], [ "sorted_by_diff[::-1][:15]", "_____no_output_____" ], [ "rating_std_by_title = data.groupby('title')['rating'].std()\nrating_std_by_title = ratings_by_title.ix[active_titles]\nrating_std_by_title.reindex", "_____no_output_____" ], [ "?pd.Series", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a7a20d8adc5393338580e797bf7c6e3b2331c72
207,118
ipynb
Jupyter Notebook
docs/examples/driver_examples/Qcodes example with Lakeshore 325.ipynb
jakeogh/Qcodes
3042317038e89264d481b212c9640c4d6b356c88
[ "MIT" ]
223
2016-10-29T15:00:24.000Z
2022-03-20T06:53:34.000Z
docs/examples/driver_examples/Qcodes example with Lakeshore 325.ipynb
jakeogh/Qcodes
3042317038e89264d481b212c9640c4d6b356c88
[ "MIT" ]
3,406
2016-10-25T10:44:50.000Z
2022-03-31T09:47:35.000Z
docs/examples/driver_examples/Qcodes example with Lakeshore 325.ipynb
Akshita07/Qcodes
f75e4786e268f415935aa4658d92526279c7a102
[ "MIT" ]
263
2016-10-25T11:35:36.000Z
2022-03-31T08:53:20.000Z
55.290443
20,067
0.605539
[ [ [ "# QCoDeS Example with Lakeshore 325\n\nHere provided is an example session with model 325 of the Lakeshore temperature controller ", "_____no_output_____" ] ], [ [ "%matplotlib notebook", "_____no_output_____" ], [ "import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom qcodes.instrument_drivers.Lakeshore.Model_325 import Model_325", "_____no_output_____" ], [ "lake = Model_325(\"lake\", \"GPIB0::12::INSTR\")", "Connected to: LSCI 325 (serial:LSA2251, firmware:1.8/1.1) in 1.30s\n" ] ], [ [ "## Sensor commands", "_____no_output_____" ] ], [ [ "# Check that the sensor is in the correct status \nlake.sensor_A.status()", "_____no_output_____" ], [ "# What temperature is it reading? \nlake.sensor_A.temperature()", "_____no_output_____" ], [ "lake.sensor_A.temperature.unit", "_____no_output_____" ], [ "# We can access the sensor objects through the sensor list as well\nassert lake.sensor_A is lake.sensor[0]", "_____no_output_____" ] ], [ [ "## Heater commands ", "_____no_output_____" ] ], [ [ "# In a closed loop configuration, heater 1 reads from... \nlake.heater_1.input_channel()", "_____no_output_____" ], [ "lake.heater_1.unit()", "_____no_output_____" ], [ "# Get the PID values \nprint(\"P = \", lake.heater_1.P())\nprint(\"I = \", lake.heater_1.I())\nprint(\"D = \", lake.heater_1.D())", "P = 400.0\nI = 40.0\nD = 10.0\n" ], [ "# Is the heater on? \nlake.heater_1.output_range()", "_____no_output_____" ] ], [ [ "## Loading and updating sensor calibration values", "_____no_output_____" ] ], [ [ "curve = lake.sensor_A.curve", "_____no_output_____" ], [ "curve_data = curve.get_data()", "_____no_output_____" ], [ "curve_data.keys()", "_____no_output_____" ], [ "fig, ax = plt.subplots()\nax.plot(curve_data[\"Temperature (K)\"], curve_data['log Ohm'], '.')\nplt.show()", "_____no_output_____" ], [ "curve.curve_name()", "_____no_output_____" ], [ "curve_x = lake.curve[23]", "_____no_output_____" ], [ "curve_x_data = curve_x.get_data()", "_____no_output_____" ], [ "curve_x_data.keys()", "_____no_output_____" ], [ "temp = np.linspace(0, 100, 200)\nnew_data = {\"Temperature (K)\": temp, \"log Ohm\": 1/(temp+1)+2}\n\nfig, ax = plt.subplots()\nax.plot(new_data[\"Temperature (K)\"], new_data[\"log Ohm\"], '.')\nplt.show()", "_____no_output_____" ], [ "curve_x.format(\"log Ohm/K\")\ncurve_x.set_data(new_data)", "_____no_output_____" ], [ "curve_x.format()", "_____no_output_____" ], [ "curve_x_data = curve_x.get_data()", "_____no_output_____" ], [ "fig, ax = plt.subplots()\nax.plot(curve_x_data[\"Temperature (K)\"], curve_x_data['log Ohm'], '.')\nplt.show()", "_____no_output_____" ] ], [ [ "## Go to a set point", "_____no_output_____" ] ], [ [ "import time\nimport numpy\nfrom IPython.display import display\nfrom ipywidgets import interact, widgets\nfrom matplotlib import pyplot as plt\n\ndef live_plot_temperature_reading(channel_to_read, read_period=0.2, n_reads=1000):\n \"\"\"\n Live plot the temperature reading from a Lakeshore sensor channel\n \n Args:\n channel_to_read\n Lakeshore channel object to read the temperature from\n read_period\n time in seconds between two reads of the temperature\n n_reads\n total number of reads to perform\n \"\"\"\n\n # Make a widget for a text display that is contantly being updated\n text = widgets.Text()\n display(text)\n\n fig, ax = plt.subplots(1)\n line, = ax.plot([], [], '*-')\n ax.set_xlabel('Time, s')\n ax.set_ylabel(f'Temperature, {channel_to_read.temperature.unit}')\n fig.show()\n plt.ion()\n\n for i in range(n_reads):\n time.sleep(read_period)\n\n # Update the text field\n text.value = f'T = {channel_to_read.temperature()}'\n\n # Add new point to the data that is being plotted\n line.set_ydata(numpy.append(line.get_ydata(), channel_to_read.temperature()))\n line.set_xdata(numpy.arange(0, len(line.get_ydata()), 1)*read_period)\n\n ax.relim() # Recalculate limits\n ax.autoscale_view(True, True, True) # Autoscale\n fig.canvas.draw() # Redraw", "_____no_output_____" ], [ "lake.heater_1.control_mode(\"Manual PID\")\nlake.heater_1.output_range(\"Low (2.5W)\")\nlake.heater_1.input_channel(\"A\")\n# The following seem to be good settings for our setup\nlake.heater_1.P(400)\nlake.heater_1.I(40)\nlake.heater_1.D(10)\n\n\nlake.heater_1.setpoint(15.0) # <- temperature \nlive_plot_temperature_reading(lake.sensor_a, n_reads=400)", "_____no_output_____" ] ], [ [ "## Querying the resistance and heater output ", "_____no_output_____" ] ], [ [ "# to get the resistance of the system (25 or 50 Ohm)\nlake.heater_1.resistance()", "_____no_output_____" ], [ "# to set the resistance of the system (25 or 50 Ohm)\nlake.heater_1.resistance(50)\nlake.heater_1.resistance()", "_____no_output_____" ], [ "# output in percent (%) of current or power, depending on setting, which can be queried by lake.heater_1.output_metric()\nlake.heater_1.heater_output() # in %, 50 means 50%", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
4a7a2e84c35259c8323fcf96a975bcc8ec3765df
7,056
ipynb
Jupyter Notebook
Exercise4-Happy-or-Sad-Dataset-Question.ipynb
snalahi/Introduction-to-TensorFlow-for-Artificial-Intelligence-Machine-Learning-and-Deep-Learning
390b113b03c3d1c15366a281a8a0078d9ccef326
[ "MIT" ]
1
2021-05-29T21:04:01.000Z
2021-05-29T21:04:01.000Z
Exercise4-Happy-or-Sad-Dataset-Question.ipynb
snalahi/Introduction-to-TensorFlow-for-Artificial-Intelligence-Machine-Learning-and-Deep-Learning
390b113b03c3d1c15366a281a8a0078d9ccef326
[ "MIT" ]
null
null
null
Exercise4-Happy-or-Sad-Dataset-Question.ipynb
snalahi/Introduction-to-TensorFlow-for-Artificial-Intelligence-Machine-Learning-and-Deep-Learning
390b113b03c3d1c15366a281a8a0078d9ccef326
[ "MIT" ]
null
null
null
32.971963
156
0.525794
[ [ [ "Below is code with a link to a happy or sad dataset which contains 80 images, 40 happy and 40 sad. \nCreate a convolutional neural network that trains to 100% accuracy on these images, which cancels training upon hitting training accuracy of >.999\n\nHint -- it will work best with 3 convolutional layers.", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nimport os\nimport zipfile\nfrom os import path, getcwd, chdir\n\n# DO NOT CHANGE THE LINE BELOW. If you are developing in a local\n# environment, then grab happy-or-sad.zip from the Coursera Jupyter Notebook\n# and place it inside a local folder and edit the path to that location\npath = f\"{getcwd()}/../tmp2/happy-or-sad.zip\"\n\nzip_ref = zipfile.ZipFile(path, 'r')\nzip_ref.extractall(\"/tmp/h-or-s\")\nzip_ref.close()", "_____no_output_____" ], [ "# GRADED FUNCTION: train_happy_sad_model\ndef train_happy_sad_model():\n # Please write your code only where you are indicated.\n # please do not remove # model fitting inline comments.\n\n DESIRED_ACCURACY = 0.999\n\n class myCallback(tf.keras.callbacks.Callback):\n def on_epoch_end(self, callback, logs={}):\n if (logs.get('acc') > DESIRED_ACCURACY):\n print(\"\\nReached {}% accuracy, stopping training\".format(DESIRED_ACCURACY*100))\n self.model.stop_training = True\n\n callbacks = myCallback()\n \n # This Code Block should Define and Compile the Model. Please assume the images are 150 X 150 in your implementation.\n model = tf.keras.models.Sequential([\n # Your Code Here\n tf.keras.layers.Conv2D(16, (3,3), activation='relu', input_shape=(150,150,3)),\n tf.keras.layers.MaxPooling2D(2, 2),\n tf.keras.layers.Conv2D(16, (3,3), activation='relu'),\n tf.keras.layers.MaxPooling2D(2, 2),\n tf.keras.layers.Conv2D(16, (3,3), activation='relu'),\n tf.keras.layers.MaxPooling2D(2, 2),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(512, activation='relu'),\n tf.keras.layers.Dense(1, activation='sigmoid')\n ])\n\n from tensorflow.keras.optimizers import RMSprop\n\n model.compile(loss='binary_crossentropy', optimizer=RMSprop(lr=0.001), metrics=['acc'])\n \n\n # This code block should create an instance of an ImageDataGenerator called train_datagen \n # And a train_generator by calling train_datagen.flow_from_directory\n\n from tensorflow.keras.preprocessing.image import ImageDataGenerator\n\n train_datagen = ImageDataGenerator(rescale=1./255)\n\n # Please use a target_size of 150 X 150.\n train_generator = train_datagen.flow_from_directory(\n '/tmp/h-or-s',\n target_size=(150, 150),\n batch_size=10,\n class_mode='binary'\n )\n # Expected output: 'Found 80 images belonging to 2 classes'\n\n # This code block should call model.fit_generator and train for\n # a number of epochs.\n # model fitting\n history = model.fit_generator(\n train_generator,\n steps_per_epoch=8,\n epochs=15,\n verbose=1,\n callbacks=[callbacks]\n )\n # model fitting\n return history.history['acc'][-1]", "_____no_output_____" ], [ "# The Expected output: \"Reached 99.9% accuracy so cancelling training!\"\"\ntrain_happy_sad_model()", "Found 80 images belonging to 2 classes.\nEpoch 1/15\n8/8 [==============================] - 1s 149ms/step - loss: 1.1935 - acc: 0.5250\nEpoch 2/15\n8/8 [==============================] - 0s 49ms/step - loss: 0.2484 - acc: 0.9500\nEpoch 3/15\n8/8 [==============================] - 0s 39ms/step - loss: 0.4208 - acc: 0.8500\nEpoch 4/15\n8/8 [==============================] - 0s 47ms/step - loss: 0.1730 - acc: 0.9000\nEpoch 5/15\n8/8 [==============================] - 0s 39ms/step - loss: 0.0960 - acc: 0.9625\nEpoch 6/15\n8/8 [==============================] - 0s 49ms/step - loss: 0.0684 - acc: 0.9750\nEpoch 7/15\n8/8 [==============================] - 0s 39ms/step - loss: 0.1233 - acc: 0.9375\nEpoch 8/15\n5/8 [=================>............] - ETA: 0s - loss: 0.0224 - acc: 1.0000\nReached 99.9% accuracy, stopping training\n8/8 [==============================] - 0s 48ms/step - loss: 0.0247 - acc: 1.0000\n" ], [ "# Now click the 'Submit Assignment' button above.\n# Once that is complete, please run the following two cells to save your work and close the notebook", "_____no_output_____" ], [ "%%javascript\n<!-- Save the notebook -->\nIPython.notebook.save_checkpoint();", "_____no_output_____" ], [ "%%javascript\nIPython.notebook.session.delete();\nwindow.onbeforeunload = null\nsetTimeout(function() { window.close(); }, 1000);", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
4a7a3d63170d810a362513381e698956713481d5
3,533
ipynb
Jupyter Notebook
_build/jupyter_execute/ipynb/Q42.ipynb
gcpeixoto/ICD
bae7d02cd467240649c89b0ba4440966fba18cc7
[ "CC0-1.0" ]
2
2021-09-09T01:56:40.000Z
2021-11-10T01:56:56.000Z
_build/jupyter_execute/ipynb/Q42.ipynb
gcpeixoto/ICD
bae7d02cd467240649c89b0ba4440966fba18cc7
[ "CC0-1.0" ]
null
null
null
_build/jupyter_execute/ipynb/Q42.ipynb
gcpeixoto/ICD
bae7d02cd467240649c89b0ba4440966fba18cc7
[ "CC0-1.0" ]
1
2021-11-23T14:24:03.000Z
2021-11-23T14:24:03.000Z
33.971154
569
0.617605
[ [ [ "## Questionário 42 (Q42)", "_____no_output_____" ], [ "Orientações: \n\n- Registre suas respostas no questionário de mesmo nome no SIGAA.\n- O tempo de registro das respostas no questionário será de 10 minutos. Portanto, resolva primeiro as questões e depois registre-as.\n- Haverá apenas 1 (uma) tentativa de resposta.\n- Submeta seu arquivo-fonte (utilizado para resolver as questões) em formato _.ipynb_ pelo SIGAA anexando-o à Tarefa denominada \"Envio de arquivo\" correspondente ao questionário.\n\n*Nota:* o arquivo-fonte será utilizado apenas como prova de execução da tarefa. Nenhuma avaliação será feita quanto ao estilo de programação.\n\n<hr>", "_____no_output_____" ], [ "**Questão 1.** No _dataset_ [enem2019.xlsx](https://github.com/gcpeixoto/ICD/blob/main/database/enem2019.xlsx), estão disponíveis as notas médias por estado obtidas nas provas do ENEM 2019. Supondo que _x_ é a diferença entre a amplitude da quantidade de inscritos na região Sudeste e a amplitude da quantidade de inscritos na região Norte, e que _y_ é o desvio médio para a série da quantidade total de inscritos de ensino médio público apenas para os estados do sul, assinale a alternativa que corretamente expressa os valores de _x_ e _y_, nesta sequência.\n\n**Obs.:** considere apenas a parte inteira do desvio médio.\n\nA. 149465 e 5690\n\nB. 169265 e 6593\n\nC. 149465 e 0\n\nD. 173921 e 2", "_____no_output_____" ], [ "**Questão 2.** Calcule o percentual _p_ de inscritos para o ENEM 2019 provenientes do ensino privado de todos os Estados em relação ao total de inscritos no exame, bem como o valor do quociente _v/V_, onde _v_ é a variância para a série do total de inscritos provenientes do ensino público e _V_ a variância para a série do total de inscritos provenientes do ensino privado. Assinale a alternativa correta para _p_ e _v/V_.\n\nA. 11.4% e 34.48\n\nB. 15% e 33.45\n\nC. 12.5% e 36.78\n\nD. 13.54% e 34.6", "_____no_output_____" ], [ "**Questão 3.** Defina a nota média $N(x)$ de cada região brasileira $x$ como a média das notas $N_i$ de cada uma das $Q$ grandes áreas de conhecimento que constam da prova do ENEM 2019, isto é,\n\n$$N(x) = \\frac{ \\sum_{i=1}^Q N_i(x)}{Q},$$\n\ne assinale a alternativa cujas regiões detém o primeiro e o segundo maiores valores de desvio padrão.\n\nA. Nordeste e Sudeste\n\nB. Sudeste e Nordeste\n\nC. Norte e Sul\n\nD. Sul e Centro-Oeste", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
4a7a4a62f99b52365d6c3bdbf0b55287861983ce
383,846
ipynb
Jupyter Notebook
src/support/notebooks/img_pre_process.ipynb
nipdep/STGAN
c72ba6cb9d23d33accc0cfa1958a2005db3ed490
[ "MIT" ]
null
null
null
src/support/notebooks/img_pre_process.ipynb
nipdep/STGAN
c72ba6cb9d23d33accc0cfa1958a2005db3ed490
[ "MIT" ]
null
null
null
src/support/notebooks/img_pre_process.ipynb
nipdep/STGAN
c72ba6cb9d23d33accc0cfa1958a2005db3ed490
[ "MIT" ]
null
null
null
694.115732
164,916
0.896284
[ [ [ "## Single image processing [resize, crope]", "_____no_output_____" ] ], [ [ "import numpy as np \n#from PIL import Image\nimport os, glob\nimport cv2", "_____no_output_____" ], [ "pic = cv2.imread('../../../data/data/1_d.jpg')\n#img = cv2.cvtColor(pic, cv2.COLOR_GRAY2RGB)\n# cv2.imshow('image', pic)\n# cv2.waitKey(0)", "_____no_output_____" ], [ "iw, ih = pic.shape[0:2]", "_____no_output_____" ], [ "w = h = 256", "_____no_output_____" ], [ "ul_img = pic[:h, :w, :]\nur_img = pic[iw-w:,ih-h:, :]\ncv2.imshow(\"upper-left\", ul_img)\ncv2.imshow(\"upper-right\", ur_img)\ncv2.imshow('image', pic)\ncv2.waitKey(0)", "_____no_output_____" ], [ "im256 = cv2.resize(pic, (256, 256), interpolation=cv2.INTER_LANCZOS4)\nim128 = cv2.resize(pic, (128, 128), interpolation=cv2.INTER_LANCZOS4)", "_____no_output_____" ], [ "cv2.imwrite('../../../data/data/1_128.jpg', im128)\ncv2.imwrite('../../../data/data/1_256.jpg', im256)\ncv2.imshow(\"128\", im128)\ncv2.imshow(\"256\", im256)\ncv2.imshow('image', pic)\ncv2.waitKey(0)", "_____no_output_____" ], [ "img_shape = (256, 256)\nim256_IN = cv2.resize(pic, img_shape, interpolation=cv2.INTER_NEAREST)\nim256_IL = cv2.resize(pic, img_shape, interpolation=cv2.INTER_LINEAR)\nim256_IA = cv2.resize(pic, img_shape, interpolation=cv2.INTER_AREA)\nim256_IC = cv2.resize(pic, img_shape, interpolation=cv2.INTER_CUBIC)\nim256_IL = cv2.resize(pic, img_shape, interpolation=cv2.INTER_LANCZOS4)\nimg_lis = [im256_IL, im256_IC, im256_IA, im256_IL, im256_IN]\nn = 0\nfor i in img_lis:\n n+=1\n cv2.imshow(f\"{n}\",i)\ncv2.imshow('image', pic)\ncv2.waitKey(0)", "_____no_output_____" ] ], [ [ "## sample image style transform by standard model", "_____no_output_____" ] ], [ [ "import tensorflow as tf \nimport tensorflow_hub as hub", "_____no_output_____" ], [ "# hub_module = hub.load('https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/2')\nmodel = tf.keras.models.load_model('../../../data/models/magenta/')", "_____no_output_____" ], [ "model.summary()", "_____no_output_____" ], [ "img_shape = (256, 256)\nstyle_img = cv2.imread('../../../data/data/3.jpg')\ncnt_img = cv2.imread('../../../data/data/1.jpg')\nst256_img = cv2.resize(style_img, img_shape, interpolation=cv2.INTER_LANCZOS4).astype(np.float32)[np.newaxis, ...]/255.\ncnt256_img = cv2.resize(cnt_img, img_shape, interpolation=cv2.INTER_LANCZOS4).astype(np.float32)[np.newaxis, ...]/255.\n", "_____no_output_____" ], [ "type(st256_img)", "_____no_output_____" ], [ "print(st256_img.shape, cnt256_img.shape)", "(1, 256, 256, 3) (1, 256, 256, 3)\n" ], [ "outputs = hub_module(tf.constant(cnt256_img), tf.constant(st256_img))", "_____no_output_____" ], [ "outputs", "_____no_output_____" ], [ "out_img = np.squeeze(np.asarray(outputs))", "_____no_output_____" ], [ "from matplotlib import pyplot as plt ", "_____no_output_____" ], [ "plt.imshow(out_img)\nplt.axis(\"off\")", "_____no_output_____" ] ], [ [ "## MSO dataset EDA", "_____no_output_____" ] ], [ [ "from os import listdir\nfrom numpy import asarray\nfrom numpy import savez_compressed\nfrom PIL import Image\nfrom matplotlib import pyplot", "_____no_output_____" ], [ " \ndef load_image(filename):\n\timage = Image.open(filename)\n\timage = image.convert('RGB')\n\tpixels = asarray(image)\n\treturn pixels\n\ndef load_imgs(dir):\n shapes = []\n for filename in listdir(dir):\n pixels = load_image(dir+filename)\n if pixels.shape not in shapes:\n shapes.append(pixels.shape)\n return shapes", "_____no_output_____" ], [ "shapes = load_imgs('../../../data/data/BAAT_dataset/')", "_____no_output_____" ], [ "print(shapes)", "96, 3), (614, 560, 3), (614, 359, 3), (614, 294, 3), (614, 517, 3), (614, 502, 3), (614, 574, 3), (614, 351, 3), (354, 614, 3), (614, 491, 3), (599, 614, 3), (614, 448, 3), (614, 397, 3), (473, 614, 3), (614, 481, 3), (614, 210, 3), (614, 314, 3), (614, 612, 3), (614, 499, 3), (392, 614, 3), (614, 329, 3), (530, 614, 3), (403, 361, 3), (258, 312, 3), (329, 270, 3), (305, 240, 3), (270, 208, 3), (270, 225, 3), (268, 312, 3), (270, 206, 3), (234, 182, 3), (234, 184, 3), (326, 466, 3), (234, 196, 3), (140, 180, 3), (349, 448, 3), (850, 423, 3), (479, 362, 3), (456, 340, 3), (424, 320, 3), (440, 309, 3), (458, 294, 3), (443, 346, 3), (440, 346, 3), (440, 328, 3), (339, 451, 3), (270, 184, 3), (452, 337, 3), (460, 347, 3), (440, 364, 3), (440, 349, 3), (391, 368, 3), (462, 367, 3), (381, 422, 3), (400, 350, 3), (296, 422, 3), (354, 435, 3), (270, 179, 3), (371, 419, 3), (368, 407, 3), (380, 395, 3), (429, 351, 3), (432, 365, 3), (481, 321, 3), (409, 369, 3), (359, 420, 3), (358, 434, 3), (436, 341, 3), (270, 209, 3), (347, 416, 3), (419, 382, 3), (333, 464, 3), (310, 476, 3), (350, 434, 3), (349, 449, 3), (356, 434, 3), (265, 406, 3), (439, 341, 3), (270, 181, 3), (447, 332, 3), (433, 352, 3), (348, 433, 3), (341, 425, 3), (339, 426, 3), (357, 448, 3), (449, 350, 3), (311, 463, 3), (312, 425, 3), (328, 464, 3), (447, 331, 3), (469, 359, 3), (345, 422, 3), (428, 349, 3), (349, 421, 3), (328, 418, 3), (347, 418, 3), (445, 337, 3), (337, 448, 3), (455, 336, 3), (335, 448, 3), (466, 305, 3), (418, 352, 3), (393, 392, 3), (445, 336, 3), (279, 362, 3), (360, 256, 3), (337, 434, 3), (360, 523, 3), (205, 312, 3), (360, 254, 3), (288, 226, 3), (245, 180, 3), (344, 253, 3), (266, 344, 3), (348, 281, 3), (178, 344, 3), (329, 434, 3), (360, 226, 3), (360, 260, 3), (356, 373, 3), (360, 263, 3), (364, 600, 3), (375, 353, 3), (360, 325, 3), (360, 304, 3), (600, 450, 3), (295, 476, 3), (600, 494, 3), (600, 410, 3), (579, 600, 3), (379, 368, 3), (360, 350, 3), (600, 535, 3), (597, 600, 3), (600, 463, 3), (314, 480, 3), (334, 363, 3), (598, 600, 3), (360, 366, 3), (360, 435, 3), (554, 447, 3), (360, 337, 3), (464, 581, 3), (525, 445, 3), (360, 242, 3), (360, 283, 3), (360, 449, 3), (359, 305, 3), (573, 600, 3), (559, 436, 3), (599, 400, 3), (360, 424, 3), (600, 346, 3), (360, 292, 3), (360, 277, 3), (535, 600, 3), (600, 446, 3), (360, 348, 3), (540, 453, 3), (360, 264, 3), (467, 600, 3), (600, 585, 3), (471, 600, 3), (600, 314, 3), (360, 328, 3), (530, 600, 3), (360, 269, 3), (360, 335, 3), (512, 600, 3), (600, 301, 3), (360, 301, 3), (358, 309, 3), (358, 374, 3), (360, 467, 3), (360, 317, 3), (599, 600, 3), (360, 266, 3), (600, 578, 3), (466, 600, 3), (360, 268, 3), (242, 337, 3), (600, 322, 3), (600, 483, 3), (360, 265, 3), (360, 303, 3), (600, 595, 3), (358, 224, 3), (600, 418, 3), (360, 244, 3), (360, 393, 3), (360, 332, 3), (360, 316, 3), (427, 600, 3), (496, 334, 3), (533, 443, 3), (600, 315, 3), (600, 478, 3), (600, 503, 3), (359, 244, 3), (469, 367, 3), (518, 600, 3), (360, 262, 3), (360, 374, 3), (360, 273, 3), (600, 537, 3), (600, 343, 3), (600, 442, 3), (360, 229, 3), (600, 536, 3), (600, 550, 3), (526, 343, 3), (360, 288, 3), (600, 382, 3), (600, 593, 3), (360, 312, 3), (463, 600, 3), (600, 541, 3), (600, 395, 3), (360, 418, 3), (360, 368, 3), (600, 475, 3), (600, 371, 3), (360, 310, 3), (850, 614, 3), (487, 462, 3), (577, 451, 3), (609, 614, 3), (514, 360, 3), (574, 481, 3), (319, 353, 3), (697, 481, 3), (903, 614, 3), (630, 450, 3), (474, 614, 3), (668, 614, 3), (270, 226, 3), (450, 577, 3), (357, 701, 3), (767, 614, 3), (776, 614, 3), (739, 614, 3), (428, 298, 3), (351, 286, 3), (947, 614, 3), (916, 614, 3), (930, 723, 3), (1053, 672, 3), (650, 422, 3), (366, 614, 3), (381, 535, 3), (583, 614, 3), (722, 614, 3), (638, 614, 3), (300, 256, 3), (644, 614, 3), (773, 614, 3), (1435, 1097, 3), (448, 298, 3), (1115, 858, 3), (522, 614, 3), (886, 581, 3), (710, 452, 3), (720, 462, 3), (398, 690, 3), (622, 482, 3), (517, 350, 3), (360, 291, 3), (689, 485, 3), (583, 388, 3), (239, 325, 3), (586, 481, 3), (804, 614, 3), (778, 614, 3), (824, 614, 3), (584, 523, 3), (510, 424, 3), (922, 614, 3), (851, 614, 3), (616, 485, 3), (541, 423, 3), (563, 422, 3), (602, 444, 3), (480, 398, 3), (299, 614, 3), (1193, 614, 3), (273, 480, 3), (717, 665, 3), (589, 377, 3), (480, 599, 3), (845, 614, 3), (596, 404, 3), (590, 510, 3), (827, 589, 3), (497, 341, 3), (455, 376, 3), (1206, 614, 3), (560, 481, 3), (290, 614, 3), (492, 420, 3), (362, 673, 3), (675, 482, 3), (1002, 661, 3), (617, 450, 3), (694, 481, 3), (539, 386, 3), (462, 636, 3), (623, 483, 3), (232, 317, 3), (1186, 900, 3), (513, 364, 3), (812, 614, 3), (584, 423, 3), (1604, 614, 3), (462, 804, 3), (985, 639, 3), (300, 201, 3), (831, 614, 3), (607, 496, 3), (577, 481, 3), (603, 483, 3), (558, 387, 3), (1169, 715, 3), (490, 408, 3), (462, 692, 3), (425, 618, 3), (619, 482, 3), (791, 614, 3), (362, 293, 3), (981, 789, 3), (1033, 709, 3), (957, 816, 3), (981, 808, 3), (487, 616, 3), (881, 614, 3), (962, 672, 3), (587, 614, 3), (390, 511, 3), (278, 614, 3), (697, 614, 3), (929, 614, 3), (466, 614, 3), (482, 381, 3), (838, 460, 3), (562, 538, 3), (572, 372, 3), (363, 614, 3), (616, 436, 3), (518, 450, 3), (561, 390, 3), (720, 493, 3), (619, 450, 3), (883, 614, 3), (590, 421, 3), (429, 361, 3), (510, 314, 3), (461, 319, 3), (461, 358, 3), (461, 354, 3), (461, 349, 3), (461, 357, 3), (461, 515, 3), (461, 341, 3), (461, 312, 3), (461, 323, 3), (461, 363, 3), (461, 413, 3), (461, 436, 3), (461, 321, 3), (461, 365, 3), (461, 403, 3), (461, 541, 3), (461, 288, 3), (461, 576, 3), (461, 384, 3), (461, 442, 3), (461, 387, 3), (461, 557, 3), (461, 595, 3), (461, 600, 3), (461, 230, 3), (461, 390, 3), (461, 536, 3), (461, 398, 3), (461, 322, 3), (461, 538, 3), (461, 310, 3), (461, 370, 3), (461, 285, 3), (365, 614, 3), (461, 313, 3), (461, 277, 3), (461, 269, 3), (461, 379, 3), (461, 518, 3), (461, 338, 3), (461, 347, 3), (461, 371, 3), (411, 614, 3), (461, 421, 3), (461, 375, 3), (412, 614, 3), (461, 392, 3), (461, 351, 3), (346, 614, 3), (298, 614, 3), (461, 533, 3), (461, 547, 3), (461, 432, 3), (461, 433, 3), (461, 361, 3), (461, 610, 3), (461, 266, 3), (461, 591, 3), (461, 521, 3), (461, 495, 3), (461, 324, 3), (461, 567, 3), (461, 342, 3), (461, 373, 3), (438, 614, 3), (461, 402, 3), (461, 337, 3), (461, 293, 3), (461, 299, 3), (461, 291, 3), (461, 283, 3), (916, 1148, 3), (611, 931, 3), (1367, 952, 3), (1000, 849, 3), (1002, 844, 3), (1000, 847, 3), (992, 826, 3), (1004, 850, 3), (1002, 829, 3), (629, 942, 3), (983, 891, 3), (627, 941, 3), (740, 941, 3), (632, 936, 3), (619, 919, 3), (627, 939, 3), (743, 934, 3), (748, 941, 3), (748, 938, 3), (1378, 990, 3), (750, 941, 3), (1362, 993, 3), (1054, 944, 3), (1372, 998, 3), (1351, 965, 3), (1344, 975, 3), (673, 957, 3), (673, 939, 3), (598, 508, 3), (471, 614, 3), (614, 565, 3), (520, 514, 3), (624, 518, 3), (614, 523, 3), (600, 518, 3), (418, 614, 3), (841, 1146, 3), (404, 600, 3), (600, 300, 3), (403, 600, 3), (529, 600, 3), (512, 257, 3), (410, 266, 3), (600, 394, 3), (600, 408, 3), (448, 600, 3), (370, 600, 3), (600, 454, 3), (600, 445, 3), (600, 406, 3), (600, 367, 3), (600, 407, 3), (600, 441, 3), (458, 503, 3), (464, 600, 3), (352, 490, 3), (303, 415, 3), (373, 600, 3), (599, 493, 3), (600, 471, 3), (357, 600, 3), (600, 262, 3), (600, 402, 3), (600, 479, 3), (600, 352, 3), (600, 388, 3), (269, 600, 3), (284, 600, 3), (254, 600, 3), (213, 600, 3), (426, 600, 3), (334, 600, 3), (538, 600, 3), (600, 404, 3), (593, 600, 3), (479, 565, 3), (508, 600, 3), (398, 600, 3), (283, 600, 3), (600, 206, 3), (506, 600, 3), (335, 600, 3), (600, 349, 3), (440, 600, 3), (386, 600, 3), (504, 426, 3), (260, 600, 3), (497, 290, 3), (512, 230, 3), (440, 213, 3), (600, 331, 3), (497, 206, 3), (477, 600, 3), (418, 600, 3), (189, 600, 3), (446, 264, 3), (600, 134, 3), (496, 600, 3), (216, 600, 3), (446, 269, 3), (451, 600, 3), (494, 221, 3), (600, 522, 3), (342, 600, 3), (439, 600, 3), (600, 510, 3), (365, 600, 3), (600, 225, 3), (600, 413, 3), (523, 600, 3), (415, 600, 3), (417, 590, 3), (600, 440, 3), (472, 600, 3), (600, 476, 3), (540, 376, 3), (600, 493, 3), (600, 362, 3), (449, 600, 3), (600, 455, 3), (600, 359, 3), (600, 289, 3), (600, 411, 3), (456, 600, 3), (600, 451, 3), (600, 370, 3), (600, 354, 3), (433, 600, 3), (600, 461, 3), (600, 350, 3), (600, 559, 3), (505, 600, 3), (600, 317, 3), (237, 600, 3), (234, 600, 3), (228, 600, 3), (499, 600, 3), (600, 374, 3), (425, 600, 3), (515, 600, 3), (600, 378, 3), (469, 600, 3), (454, 600, 3), (397, 600, 3), (356, 600, 3), (385, 600, 3), (502, 600, 3), (552, 600, 3), (420, 600, 3), (482, 600, 3), (382, 600, 3), (473, 600, 3), (375, 600, 3), (600, 178, 3), (600, 401, 3), (453, 600, 3), (478, 600, 3), (337, 600, 3), (600, 358, 3), (600, 308, 3), (600, 284, 3), (600, 212, 3), (600, 373, 3), (600, 355, 3), (600, 574, 3), (519, 600, 3), (600, 247, 3), (345, 226, 3), (254, 331, 3), (768, 598, 3), (278, 359, 3), (236, 190, 3), (540, 441, 3), (215, 284, 3), (236, 182, 3), (438, 565, 3), (521, 399, 3), (566, 337, 3), (479, 600, 3), (446, 353, 3), (228, 167, 3), (236, 165, 3), (254, 204, 3), (240, 189, 3), (494, 329, 3), (379, 482, 3), (278, 392, 3), (476, 351, 3), (486, 338, 3), (479, 354, 3), (590, 347, 3), (301, 392, 3), (668, 533, 3), (674, 533, 3), (653, 454, 3), (709, 415, 3), (244, 203, 3), (464, 354, 3), (577, 768, 3), (274, 668, 3), (157, 128, 3), (228, 146, 3), (436, 306, 3), (335, 382, 3), (572, 720, 3), (600, 428, 3), (454, 331, 3), (242, 300, 3), (344, 472, 3), (554, 442, 3), (653, 529, 3), (490, 317, 3), (272, 668, 3), (136, 193, 3), (600, 390, 3), (653, 448, 3), (600, 437, 3), (623, 496, 3), (362, 480, 3), (600, 591, 3), (504, 600, 3), (600, 417, 3), (361, 238, 3), (449, 349, 3), (470, 541, 3), (310, 463, 3), (437, 392, 3), (473, 351, 3), (653, 617, 3), (614, 509, 3), (419, 331, 3), (208, 362, 3), (464, 349, 3), (348, 434, 3), (623, 437, 3), (600, 398, 3), (600, 502, 3), (600, 477, 3), (269, 369, 3), (600, 583, 3), (286, 389, 3), (472, 351, 3), (485, 653, 3), (466, 394, 3), (436, 565, 3), (295, 360, 3), (206, 263, 3), (784, 474, 3), (361, 580, 3), (191, 240, 3), (299, 218, 3), (254, 322, 3), (653, 485, 3), (553, 394, 3), (443, 267, 3), (165, 226, 3), (233, 362, 3), (519, 653, 3), (511, 420, 3), (593, 446, 3), (600, 380, 3), (592, 768, 3), (496, 653, 3), (401, 338, 3), (212, 277, 3), (653, 499, 3), (451, 653, 3), (600, 458, 3), (653, 301, 3), (614, 389, 3), (496, 236, 3), (612, 478, 3), (600, 464, 3), (600, 498, 3), (480, 351, 3), (719, 477, 3), (600, 456, 3), (539, 443, 3), (489, 394, 3), (228, 186, 3), (600, 472, 3), (439, 653, 3), (600, 452, 3), (575, 396, 3), (653, 553, 3), (253, 321, 3), (215, 187, 3), (547, 429, 3), (600, 376, 3), (476, 367, 3), (677, 456, 3), (290, 389, 3), (602, 494, 3), (653, 430, 3), (532, 653, 3), (614, 513, 3), (600, 542, 3), (523, 235, 3), (326, 232, 3), (211, 257, 3), (646, 465, 3), (486, 356, 3), (345, 244, 3), (597, 768, 3), (215, 360, 3), (600, 460, 3), (487, 278, 3), (768, 620, 3), (653, 478, 3), (495, 600, 3), (612, 464, 3), (600, 496, 3), (529, 571, 3), (542, 338, 3), (326, 263, 3), (501, 600, 3), (210, 282, 3), (600, 473, 3), (653, 501, 3), (423, 334, 3), (271, 668, 3), (600, 420, 3), (584, 768, 3), (768, 566, 3), (653, 471, 3), (600, 422, 3), (476, 600, 3), (515, 653, 3), (600, 514, 3), (345, 256, 3), (600, 465, 3), (326, 235, 3), (202, 240, 3), (228, 227, 3), (653, 437, 3), (616, 768, 3), (530, 508, 3), (614, 531, 3), (600, 469, 3), (653, 515, 3), (768, 563, 3), (593, 454, 3), (401, 349, 3), (344, 434, 3), (768, 522, 3), (236, 329, 3), (600, 421, 3), (476, 354, 3), (450, 341, 3), (403, 490, 3), (653, 411, 3), (343, 452, 3), (653, 509, 3), (286, 220, 3), (332, 225, 3), (529, 337, 3), (563, 465, 3), (346, 431, 3), (600, 368, 3), (569, 396, 3), (345, 275, 3), (625, 441, 3), (252, 204, 3), (345, 282, 3), (310, 459, 3), (600, 443, 3), (768, 475, 3), (383, 268, 3), (483, 600, 3), (356, 530, 3), (556, 391, 3), (480, 356, 3), (452, 344, 3), (284, 653, 3), (600, 403, 3), (653, 542, 3), (653, 472, 3), (478, 308, 3), (602, 438, 3), (614, 514, 3), (768, 510, 3), (248, 369, 3), (600, 565, 3), (472, 612, 3), (653, 505, 3), (653, 427, 3), (361, 473, 3), (736, 768, 3), (345, 277, 3), (234, 361, 3), (180, 205, 3), (600, 439, 3), (384, 224, 3), (271, 667, 3), (370, 471, 3), (290, 229, 3), (600, 488, 3), (228, 224, 3), (653, 368, 3), (344, 417, 3), (351, 446, 3), (625, 505, 3), (536, 394, 3), (738, 410, 3), (235, 193, 3), (653, 456, 3), (536, 446, 3), (249, 362, 3), (394, 571, 3), (326, 242, 3), (489, 338, 3), (455, 600, 3), (581, 409, 3), (623, 499, 3), (614, 395, 3), (411, 653, 3), (600, 814, 3), (253, 173, 3), (473, 640, 3), (196, 240, 3), (609, 396, 3), (345, 280, 3), (345, 199, 3), (228, 313, 3), (592, 338, 3), (391, 600, 3), (389, 420, 3), (614, 470, 3), (500, 600, 3), (548, 653, 3), (473, 391, 3), (694, 431, 3), (590, 472, 3), (653, 481, 3), (235, 189, 3), (594, 477, 3), (210, 266, 3), (600, 481, 3), (579, 768, 3), (628, 382, 3), (541, 396, 3), (768, 577, 3), (595, 484, 3), (600, 253, 3), (653, 428, 3), (384, 275, 3), (415, 653, 3), (653, 510, 3), (462, 392, 3), (600, 427, 3), (441, 353, 3), (431, 653, 3), (234, 156, 3), (240, 180, 3), (399, 338, 3), (269, 598, 3), (213, 164, 3), (239, 205, 3), (441, 349, 3), (240, 191, 3), (223, 293, 3), (193, 149, 3), (380, 553, 3), (376, 658, 3), (264, 396, 3), (324, 210, 3), (653, 544, 3), (653, 467, 3), (768, 501, 3), (345, 252, 3), (346, 475, 3), (487, 653, 3), (653, 261, 3), (253, 185, 3), (552, 653, 3), (356, 580, 3), (345, 276, 3), (653, 548, 3), (600, 485, 3), (395, 298, 3), (334, 405, 3), (768, 463, 3), (499, 394, 3), (419, 346, 3), (653, 476, 3), (542, 392, 3), (386, 274, 3), (614, 487, 3), (383, 269, 3), (594, 454, 3), (235, 179, 3), (290, 360, 3), (386, 480, 3), (613, 480, 3), (825, 480, 3), (640, 480, 3), (615, 480, 3), (370, 480, 3), (491, 382, 3), (322, 384, 3), (589, 480, 3), (333, 430, 3), (420, 517, 3), (606, 480, 3), (634, 480, 3), (304, 420, 3), (498, 420, 3), (335, 416, 3), (319, 420, 3), (347, 420, 3), (329, 420, 3), (483, 360, 3), (302, 420, 3), (384, 540, 3), (259, 420, 3), (310, 420, 3), (380, 368, 3), (587, 420, 3), (423, 540, 3), (559, 480, 3), (628, 480, 3), (464, 680, 3), (496, 626, 3), (499, 676, 3), (614, 423, 3), (677, 574, 3), (529, 674, 3), (427, 625, 3), (1441, 1906, 3), (461, 666, 3), (658, 521, 3), (467, 583, 3), (482, 614, 3), (701, 886, 3), (442, 614, 3), (504, 614, 3), (653, 487, 3), (703, 1200, 3), (483, 614, 3), (518, 1200, 3), (614, 426, 3), (360, 614, 3), (449, 614, 3), (568, 679, 3), (404, 614, 3), (922, 725, 3), (664, 524, 3), (720, 577, 3), (719, 594, 3), (912, 763, 3), (747, 922, 3), (476, 654, 3), (340, 614, 3), (658, 886, 3), (614, 561, 3), (886, 688, 3), (328, 593, 3), (874, 687, 3), (922, 647, 3), (562, 681, 3), (359, 614, 3), (451, 614, 3), (731, 922, 3), (644, 886, 3), (553, 485, 3), (514, 614, 3), (403, 614, 3), (578, 442, 3), (534, 677, 3), (533, 614, 3), (614, 353, 3), (500, 614, 3), (531, 614, 3), (464, 643, 3), (675, 982, 3), (535, 671, 3), (1246, 1920, 3), (479, 614, 3), (310, 614, 3), (361, 614, 3), (518, 614, 3), (1033, 1399, 3), (536, 679, 3), (659, 543, 3), (378, 614, 3), (497, 619, 3), (489, 614, 3), (388, 614, 3), (614, 332, 3), (952, 1194, 3), (568, 614, 3), (355, 614, 3), (619, 451, 3), (398, 614, 3), (652, 568, 3), (863, 1076, 3), (672, 534, 3), (567, 676, 3), (397, 614, 3), (644, 485, 3), (444, 1184, 3), (674, 555, 3), (457, 614, 3), (676, 454, 3), (477, 614, 3), (675, 527, 3), (605, 930, 3), (886, 679, 3), (461, 552, 3), (370, 614, 3), (391, 643, 3), (638, 410, 3), (483, 682, 3), (1080, 773, 3), (587, 729, 3), (330, 677, 3), (467, 571, 3), (538, 768, 3), (389, 635, 3), (493, 678, 3), (827, 655, 3), (673, 463, 3), (614, 390, 3), (665, 506, 3), (523, 676, 3), (524, 720, 3), (668, 565, 3), (558, 682, 3), (537, 614, 3), (531, 670, 3), (593, 664, 3), (594, 751, 3), (497, 614, 3), (568, 461, 3), (562, 768, 3), (674, 460, 3), (788, 960, 3), (427, 678, 3), (542, 678, 3), (884, 696, 3), (283, 922, 3), (886, 707, 3), (646, 473, 3), (649, 886, 3), (674, 523, 3), (548, 677, 3), (446, 571, 3), (614, 532, 3), (520, 677, 3), (999, 1200, 3), (642, 467, 3), (481, 614, 3), (614, 539, 3), (745, 540, 3), (614, 528, 3), (377, 638, 3), (629, 494, 3), (611, 502, 3), (614, 352, 3), (649, 512, 3), (396, 614, 3), (886, 698, 3), (659, 552, 3), (707, 892, 3), (909, 1117, 3), (614, 454, 3), (676, 553, 3), (480, 473, 3), (614, 382, 3), (648, 900, 3), (316, 614, 3), (487, 672, 3), (562, 643, 3), (932, 1024, 3), (674, 511, 3), (1518, 1906, 3), (878, 1080, 3), (614, 360, 3), (540, 455, 3), (663, 886, 3), (682, 886, 3), (886, 658, 3), (1487, 1906, 3), (676, 565, 3), (1463, 1214, 3), (834, 719, 3), (520, 679, 3), (696, 886, 3), (305, 614, 3), (571, 680, 3), (674, 536, 3), (1473, 1906, 3), (614, 483, 3), (634, 886, 3), (872, 1097, 3), (720, 602, 3), (309, 243, 3), (757, 480, 3), (277, 480, 3), (473, 420, 3), (459, 344, 3), (340, 464, 3), (420, 315, 3), (480, 290, 3), (623, 477, 3), (174, 480, 3), (480, 392, 3), (544, 480, 3), (476, 308, 3), (420, 278, 3), (393, 380, 3), (475, 450, 3), (622, 474, 3), (420, 257, 3), (442, 365, 3), (420, 425, 3), (502, 480, 3), (286, 480, 3), (420, 269, 3), (473, 332, 3), (652, 480, 3), (420, 319, 3), (548, 480, 3), (682, 480, 3), (420, 448, 3), (448, 358, 3), (450, 349, 3), (455, 347, 3), (661, 480, 3), (421, 480, 3), (515, 429, 3), (478, 450, 3), (462, 348, 3), (561, 480, 3), (419, 379, 3), (622, 480, 3), (480, 341, 3), (420, 286, 3), (426, 316, 3), (480, 397, 3), (667, 474, 3), (706, 474, 3), (382, 278, 3), (467, 401, 3), (729, 480, 3), (508, 480, 3), (306, 480, 3), (498, 480, 3), (385, 480, 3), (307, 480, 3), (346, 480, 3), (425, 420, 3), (492, 474, 3), (287, 399, 3), (339, 480, 3), (357, 450, 3), (580, 450, 3), (353, 425, 3), (634, 477, 3), (420, 357, 3), (366, 480, 3), (480, 446, 3), (420, 340, 3), (656, 480, 3), (482, 480, 3), (539, 480, 3), (555, 450, 3), (482, 474, 3), (420, 303, 3), (480, 365, 3), (598, 450, 3), (473, 343, 3), (646, 480, 3), (735, 480, 3), (444, 341, 3), (432, 343, 3), (698, 480, 3), (328, 420, 3), (278, 480, 3), (474, 342, 3), (602, 480, 3), (503, 311, 3), (718, 449, 3), (432, 325, 3), (538, 480, 3), (600, 480, 3), (480, 331, 3), (418, 346, 3), (487, 404, 3), (401, 479, 3), (456, 328, 3), (592, 480, 3), (595, 450, 3), (526, 480, 3), (466, 350, 3), (572, 480, 3), (632, 420, 3), (500, 450, 3), (578, 404, 3), (651, 480, 3), (326, 480, 3), (755, 420, 3), (689, 430, 3), (412, 477, 3), (626, 474, 3), (647, 480, 3), (341, 473, 3), (494, 316, 3), (778, 480, 3), (601, 480, 3), (157, 317, 3), (603, 450, 3), (365, 479, 3), (417, 480, 3), (233, 480, 3), (494, 327, 3), (506, 480, 3), (413, 394, 3), (753, 480, 3), (243, 279, 3), (563, 450, 3), (680, 480, 3), (353, 450, 3), (420, 292, 3), (388, 407, 3), (465, 351, 3), (437, 365, 3), (432, 371, 3), (240, 318, 3), (432, 302, 3), (539, 450, 3), (651, 450, 3), (416, 365, 3), (480, 323, 3), (430, 480, 3), (686, 450, 3), (570, 404, 3), (1002, 480, 3), (461, 583, 3), (464, 398, 3), (593, 540, 3), (697, 540, 3), (357, 614, 3), (787, 510, 3), (476, 566, 3), (633, 486, 3), (470, 358, 3), (360, 230, 3), (470, 575, 3), (496, 404, 3), (662, 458, 3), (504, 336, 3), (943, 360, 3), (398, 617, 3), (658, 531, 3), (600, 364, 3), (626, 466, 3), (661, 497, 3), (574, 364, 3), (542, 664, 3), (601, 387, 3), (582, 409, 3), (541, 451, 3), (454, 660, 3), (637, 540, 3), (360, 297, 3), (576, 409, 3), (308, 480, 3), (540, 470, 3), (600, 486, 3), (432, 682, 3), (360, 251, 3), (624, 436, 3), (607, 960, 3), (480, 375, 3), (601, 492, 3), (419, 628, 3), (603, 461, 3), (613, 422, 3), (585, 437, 3), (946, 363, 3), (703, 557, 3), (470, 343, 3), (491, 433, 3), (468, 490, 3), (539, 312, 3), (192, 319, 3), (661, 448, 3), (480, 718, 3), (372, 263, 3), (464, 362, 3), (468, 466, 3), (812, 526, 3), (570, 816, 3), (450, 624, 3), (334, 614, 3), (461, 672, 3), (412, 600, 3), (690, 325, 3), (476, 389, 3), (614, 305, 3), (473, 605, 3), (410, 632, 3), (624, 493, 3), (739, 540, 3), (428, 619, 3), (469, 660, 3), (465, 318, 3), (497, 627, 3), (646, 420, 3), (468, 301, 3), (943, 540, 3), (615, 427, 3), (576, 405, 3), (189, 195, 3), (660, 460, 3), (612, 407, 3), (528, 408, 3), (459, 654, 3), (437, 600, 3), (742, 480, 3), (528, 437, 3), (582, 465, 3), (780, 527, 3), (610, 607, 3), (630, 460, 3), (642, 453, 3), (394, 480, 3), (442, 312, 3), (720, 594, 3), (463, 317, 3), (719, 438, 3), (462, 817, 3), (484, 369, 3), (704, 450, 3), (463, 340, 3), (360, 228, 3), (480, 791, 3), (609, 780, 3), (720, 536, 3), (660, 664, 3), (540, 433, 3), (644, 492, 3), (343, 624, 3), (360, 290, 3), (416, 317, 3), (464, 351, 3), (820, 570, 3), (994, 1195, 3), (624, 502, 3), (486, 768, 3), (674, 513, 3), (881, 724, 3), (768, 599, 3), (441, 768, 3), (692, 719, 3), (503, 623, 3), (553, 434, 3), (615, 479, 3), (358, 574, 3), (629, 467, 3), (457, 480, 3), (830, 1199, 3), (565, 491, 3), (736, 614, 3), (559, 413, 3), (746, 614, 3), (496, 637, 3), (633, 517, 3), (619, 500, 3), (644, 619, 3), (685, 540, 3), (419, 609, 3), (675, 551, 3), (287, 768, 3), (666, 504, 3), (522, 425, 3), (1198, 937, 3), (610, 676, 3), (612, 494, 3), (519, 768, 3), (652, 517, 3), (604, 469, 3), (1229, 605, 3), (604, 415, 3), (882, 1198, 3), (1198, 967, 3), (472, 768, 3), (675, 530, 3), (608, 503, 3), (669, 513, 3), (613, 734, 3), (421, 614, 3), (662, 544, 3), (483, 768, 3), (602, 527, 3), (360, 619, 3), (604, 488, 3), (647, 412, 3), (614, 632, 3), (1229, 983, 3), (448, 614, 3), (960, 554, 3), (674, 469, 3), (683, 571, 3), (407, 768, 3), (883, 730, 3), (612, 489, 3), (540, 700, 3), (413, 582, 3), (643, 474, 3), (673, 575, 3), (514, 620, 3), (612, 768, 3), (1198, 971, 3), (632, 416, 3), (422, 510, 3), (340, 768, 3), (454, 605, 3), (634, 486, 3), (646, 481, 3), (611, 768, 3), (1200, 838, 3), (570, 469, 3), (516, 614, 3), (636, 435, 3), (614, 386, 3), (1181, 931, 3), (672, 522, 3), (1198, 952, 3), (443, 768, 3), (859, 1063, 3), (742, 612, 3), (806, 1197, 3), (893, 1187, 3), (554, 453, 3), (751, 605, 3), (671, 431, 3), (748, 614, 3), (606, 768, 3), (1200, 1058, 3), (614, 732, 3), (478, 768, 3), (666, 421, 3), (836, 614, 3), (677, 554, 3), (1027, 858, 3), (686, 540, 3), (609, 767, 3), (1229, 1044, 3), (519, 614, 3), (607, 533, 3), (614, 733, 3), (614, 178, 3), (490, 768, 3), (664, 510, 3), (613, 493, 3), (623, 514, 3), (606, 440, 3), (602, 503, 3), (506, 768, 3), (644, 547, 3), (1198, 847, 3), (609, 498, 3), (527, 614, 3), (1028, 608, 3), (731, 614, 3), (620, 472, 3), (646, 596, 3), (667, 513, 3), (614, 717, 3), (670, 487, 3), (632, 586, 3), (726, 900, 3), (757, 614, 3), (584, 427, 3), (625, 504, 3), (604, 460, 3), (689, 547, 3), (610, 506, 3), (612, 477, 3), (540, 660, 3), (674, 392, 3), (636, 461, 3), (394, 679, 3), (591, 614, 3), (674, 527, 3), (661, 973, 3), (594, 465, 3), (622, 503, 3), (613, 503, 3), (616, 488, 3), (611, 471, 3), (503, 768, 3), (707, 614, 3), (614, 738, 3), (670, 529, 3), (677, 530, 3), (532, 768, 3), (541, 475, 3), (615, 364, 3), (624, 376, 3), (602, 482, 3), (638, 517, 3), (584, 479, 3), (452, 376, 3), (579, 774, 3), (1229, 919, 3), (568, 768, 3), (587, 485, 3), (623, 378, 3), (577, 334, 3), (389, 630, 3), (630, 524, 3), (1229, 370, 3), (426, 614, 3), (606, 503, 3), (1229, 1528, 3), (616, 478, 3), (420, 588, 3), (623, 534, 3), (859, 1199, 3), (595, 465, 3), (330, 597, 3), (751, 908, 3), (664, 439, 3), (614, 751, 3), (674, 554, 3), (1198, 881, 3), (610, 481, 3), (614, 665, 3), (601, 493, 3), (540, 438, 3), (648, 535, 3), (1196, 977, 3), (286, 411, 3), (629, 521, 3), (865, 418, 3), (1229, 1051, 3), (594, 505, 3), (931, 720, 3), (614, 547, 3), (630, 566, 3), (343, 614, 3), (1229, 1016, 3), (611, 479, 3), (664, 425, 3), (395, 768, 3), (1127, 927, 3), (620, 538, 3), (365, 587, 3), (629, 499, 3), (704, 881, 3), (614, 524, 3), (1229, 890, 3), (630, 504, 3), (433, 576, 3), (835, 1151, 3), (610, 768, 3), (942, 775, 3), (664, 518, 3), (613, 497, 3), (648, 467, 3), (606, 502, 3), (635, 516, 3), (587, 492, 3), (589, 466, 3), (1229, 763, 3), (781, 960, 3), (802, 614, 3), (610, 364, 3), (611, 461, 3), (193, 572, 3), (512, 614, 3), (960, 775, 3), (624, 505, 3), (614, 737, 3), (626, 494, 3), (1200, 928, 3), (619, 491, 3), (556, 502, 3), (602, 533, 3), (602, 764, 3), (768, 575, 3), (1229, 964, 3), (581, 767, 3), (1219, 1536, 3), (532, 669, 3), (677, 516, 3), (479, 407, 3), (314, 720, 3), (882, 657, 3), (391, 768, 3), (615, 504, 3), (679, 533, 3), (466, 579, 3), (553, 720, 3), (422, 353, 3), (517, 720, 3), (548, 720, 3), (528, 768, 3), (557, 768, 3), (489, 414, 3), (560, 720, 3), (432, 768, 3), (614, 622, 3), (433, 768, 3), (454, 409, 3), (558, 768, 3), (434, 768, 3), (603, 768, 3), (547, 743, 3), (505, 768, 3), (589, 720, 3), (614, 428, 3), (582, 768, 3), (542, 720, 3), (548, 541, 3), (489, 720, 3), (529, 720, 3), (571, 720, 3), (614, 764, 3), (583, 768, 3), (614, 706, 3), (416, 618, 3), (546, 768, 3), (591, 768, 3), (464, 605, 3), (586, 720, 3), (508, 720, 3), (460, 720, 3), (481, 720, 3), (497, 720, 3), (559, 768, 3), (440, 768, 3), (506, 720, 3), (614, 647, 3), (552, 720, 3), (435, 768, 3), (518, 768, 3), (560, 768, 3), (476, 768, 3), (614, 724, 3), (421, 625, 3), (538, 720, 3), (437, 768, 3), (508, 768, 3), (547, 720, 3), (439, 768, 3), (562, 720, 3), (496, 720, 3), (438, 768, 3), (549, 720, 3), (573, 720, 3), (551, 720, 3), (547, 768, 3), (571, 768, 3), (430, 687, 3), (420, 751, 3), (556, 720, 3), (611, 720, 3), (565, 720, 3), (550, 720, 3), (557, 720, 3), (526, 720, 3), (547, 708, 3), (680, 420, 3), (792, 448, 3), (555, 451, 3), (480, 479, 3), (480, 410, 3), (360, 458, 3), (720, 453, 3), (516, 480, 3), (279, 480, 3), (478, 480, 3), (692, 454, 3), (776, 380, 3), (480, 336, 3), (713, 454, 3), (305, 480, 3), (485, 480, 3), (736, 480, 3), (587, 480, 3), (610, 455, 3), (547, 416, 3), (670, 435, 3), (674, 475, 3), (416, 480, 3), (487, 480, 3), (197, 240, 3), (731, 464, 3), (566, 435, 3), (474, 480, 3), (637, 343, 3), (343, 480, 3), (185, 240, 3), (480, 480, 3), (463, 480, 3), (480, 342, 3), (403, 480, 3), (499, 480, 3), (603, 443, 3), (468, 480, 3), (391, 480, 3), (589, 444, 3), (228, 240, 3), (488, 463, 3), (540, 401, 3), (510, 419, 3), (693, 425, 3), (354, 446, 3), (384, 465, 3), (342, 480, 3), (422, 480, 3), (506, 384, 3), (614, 593, 3), (614, 437, 3), (307, 614, 3), (614, 604, 3), (326, 614, 3), (520, 614, 3), (174, 614, 3), (614, 392, 3), (603, 614, 3), (344, 614, 3), (614, 587, 3), (614, 607, 3), (614, 516, 3), (614, 533, 3), (614, 354, 3), (328, 614, 3), (614, 525, 3), (528, 614, 3), (320, 614, 3), (614, 453, 3), (419, 614, 3), (647, 900, 3), (887, 727, 3), (384, 330, 3), (532, 779, 3), (1013, 734, 3), (603, 726, 3), (1024, 738, 3), (401, 614, 3), (685, 509, 3), (689, 543, 3), (685, 529, 3), (695, 532, 3), (960, 719, 3), (562, 780, 3), (1010, 742, 3), (960, 766, 3), (823, 751, 3), (569, 614, 3), (402, 574, 3), (614, 569, 3), (960, 718, 3), (886, 648, 3), (587, 775, 3), (970, 720, 3), (951, 719, 3), (742, 577, 3), (1223, 934, 3), (1168, 1522, 3), (886, 600, 3), (886, 669, 3), (876, 960, 3), (899, 719, 3), (673, 614, 3), (897, 726, 3), (701, 762, 3), (614, 563, 3), (886, 773, 3), (2296, 1786, 3), (640, 512, 3), (540, 398, 3), (990, 756, 3), (513, 720, 3), (629, 886, 3), (780, 626, 3), (581, 780, 3), (586, 780, 3), (809, 668, 3), (510, 704, 3), (450, 655, 3), (923, 761, 3), (1436, 1200, 3), (1140, 890, 3), (472, 614, 3), (540, 717, 3), (759, 720, 3), (540, 437, 3), (1568, 1214, 3), (842, 719, 3), (614, 581, 3), (500, 455, 3), (955, 720, 3), (886, 754, 3), (886, 716, 3), (614, 564, 3), (566, 719, 3), (972, 754, 3), (814, 1065, 3), (960, 689, 3), (505, 602, 3), (739, 523, 3), (1355, 946, 3), (1488, 1128, 3), (927, 714, 3), (1800, 1519, 3), (614, 507, 3), (1254, 1522, 3), (967, 751, 3), (614, 471, 3), (562, 614, 3), (960, 793, 3), (960, 799, 3), (600, 492, 3), (886, 745, 3), (960, 902, 3), (910, 691, 3), (964, 720, 3), (599, 761, 3), (614, 379, 3), (888, 720, 3), (886, 720, 3), (979, 754, 3), (779, 719, 3), (960, 721, 3), (1606, 1219, 3), (936, 720, 3), (981, 758, 3), (1026, 731, 3), (614, 558, 3), (614, 568, 3), (1632, 1301, 3), (1634, 1235, 3), (625, 480, 3), (391, 484, 3), (352, 484, 3), (360, 459, 3), (350, 484, 3), (358, 484, 3), (395, 484, 3), (335, 484, 3), (377, 484, 3), (484, 360, 3), (375, 484, 3), (484, 249, 3), (340, 248, 3), (396, 484, 3), (484, 405, 3), (367, 484, 3), (401, 484, 3), (351, 484, 3), (484, 357, 3), (461, 336, 3), (420, 277, 3), (420, 274, 3), (484, 309, 3), (341, 484, 3), (389, 484, 3), (382, 484, 3), (399, 484, 3), (360, 286, 3), (292, 484, 3), (484, 389, 3), (432, 315, 3), (388, 484, 3), (312, 354, 3), (484, 396, 3), (392, 484, 3), (383, 484, 3), (409, 484, 3), (379, 484, 3), (484, 330, 3), (295, 484, 3), (408, 484, 3), (329, 484, 3), (372, 484, 3), (387, 484, 3), (484, 339, 3), (484, 327, 3), (325, 484, 3), (484, 311, 3), (378, 270, 3), (484, 365, 3), (484, 394, 3), (376, 484, 3), (370, 484, 3), (420, 271, 3), (365, 484, 3), (484, 408, 3), (333, 484, 3), (413, 484, 3), (338, 484, 3), (390, 484, 3), (323, 484, 3), (360, 468, 3), (484, 329, 3), (356, 484, 3), (360, 281, 3), (461, 348, 3), (360, 276, 3), (364, 484, 3), (397, 484, 3), (484, 354, 3), (265, 267, 3), (344, 484, 3), (450, 339, 3), (354, 484, 3), (324, 484, 3), (480, 568, 3), (480, 739, 3), (480, 600, 3), (480, 667, 3), (480, 697, 3), (480, 452, 3), (480, 597, 3), (480, 590, 3), (480, 327, 3), (480, 677, 3), (480, 760, 3), (480, 521, 3), (480, 626, 3), (248, 614, 3), (480, 612, 3), (480, 588, 3), (480, 602, 3), (480, 596, 3), (480, 832, 3), (480, 617, 3), (480, 514, 3), (480, 844, 3), (480, 485, 3), (480, 369, 3), (480, 603, 3), (960, 1205, 3), (480, 701, 3), (480, 618, 3), (480, 611, 3), (480, 385, 3), (480, 316, 3), (480, 684, 3), (480, 647, 3), (480, 788, 3), (480, 712, 3), (320, 482, 3), (480, 566, 3), (480, 579, 3), (480, 436, 3), (480, 688, 3), (480, 540, 3), (480, 562, 3), (480, 333, 3), (480, 391, 3), (480, 604, 3), (480, 808, 3), (480, 558, 3), (480, 641, 3), (480, 458, 3), (480, 606, 3), (480, 662, 3), (480, 405, 3), (480, 589, 3), (480, 615, 3), (480, 592, 3), (480, 616, 3), (480, 484, 3), (480, 583, 3), (480, 530, 3), (480, 620, 3), (480, 379, 3), (480, 593, 3), (480, 455, 3), (480, 735, 3), (480, 489, 3), (480, 463, 3), (480, 737, 3), (480, 938, 3), (480, 707, 3), (480, 941, 3), (480, 577, 3), (480, 404, 3), (480, 654, 3), (480, 419, 3), (480, 661, 3), (480, 322, 3), (480, 757, 3), (480, 678, 3), (480, 487, 3), (480, 529, 3), (480, 481, 3), (480, 721, 3), (917, 614, 3), (516, 900, 3), (264, 614, 3), (360, 793, 3), (445, 1260, 3), (598, 614, 3), (796, 614, 3), (818, 614, 3), (732, 450, 3), (1104, 614, 3), (805, 390, 3), (466, 526, 3), (923, 614, 3), (990, 727, 3), (1050, 788, 3), (635, 481, 3), (1080, 756, 3), (849, 614, 3), (902, 614, 3), (710, 614, 3), (989, 614, 3), (492, 614, 3), (907, 614, 3), (835, 614, 3), (723, 446, 3), (362, 320, 3), (402, 1130, 3), (364, 319, 3), (715, 204, 3), (685, 480, 3), (1020, 707, 3), (286, 614, 3), (897, 614, 3), (886, 496, 3), (953, 614, 3), (1080, 715, 3), (1433, 946, 3), (970, 614, 3), (944, 614, 3), (795, 614, 3), (689, 607, 3), (819, 614, 3), (611, 614, 3), (886, 504, 3), (720, 738, 3), (575, 857, 3), (704, 420, 3), (516, 571, 3), (573, 1260, 3), (774, 614, 3), (714, 614, 3), (720, 720, 3), (910, 614, 3), (958, 614, 3), (639, 614, 3), (480, 319, 3), (377, 614, 3), (937, 614, 3), (394, 614, 3), (251, 614, 3), (1123, 778, 3), (604, 604, 3), (431, 480, 3), (1080, 768, 3), (432, 638, 3), (497, 545, 3), (243, 598, 3), (242, 600, 3), (616, 669, 3), (807, 614, 3), (1080, 717, 3), (780, 780, 3), (589, 614, 3), (780, 789, 3), (780, 607, 3), (440, 278, 3), (1080, 703, 3), (1080, 764, 3), (482, 780, 3), (1098, 720, 3), (945, 614, 3), (699, 450, 3), (300, 812, 3), (300, 823, 3), (300, 814, 3), (300, 808, 3), (805, 614, 3), (834, 614, 3), (775, 614, 3), (473, 480, 3), (1080, 700, 3), (629, 570, 3), (356, 614, 3), (480, 468, 3), (496, 747, 3), (603, 444, 3), (913, 614, 3), (692, 614, 3), (874, 614, 3), (918, 614, 3), (865, 475, 3), (420, 295, 3), (920, 922, 3), (1080, 454, 3), (803, 614, 3), (621, 614, 3), (605, 614, 3), (625, 614, 3), (919, 614, 3), (905, 614, 3), (930, 614, 3), (211, 614, 3), (378, 827, 3), (741, 420, 3), (597, 482, 3), (647, 540, 3), (409, 614, 3), (413, 614, 3), (555, 614, 3), (881, 602, 3), (744, 660, 3), (750, 635, 3), (720, 641, 3), (612, 507, 3), (702, 616, 3), (723, 600, 3), (614, 542, 3), (406, 614, 3), (263, 614, 3), (600, 587, 3), (887, 480, 3), (884, 450, 3), (840, 461, 3), (1032, 733, 3), (690, 402, 3), (605, 672, 3), (902, 540, 3), (1267, 820, 3), (720, 467, 3), (886, 477, 3), (1216, 758, 3), (780, 699, 3), (614, 541, 3), (552, 614, 3), (667, 360, 3), (826, 600, 3), (1108, 667, 3), (582, 437, 3), (1032, 754, 3), (614, 550, 3), (600, 912, 3), (1004, 600, 3), (939, 600, 3), (407, 614, 3), (529, 614, 3), (886, 755, 3), (515, 614, 3), (570, 633, 3), (510, 676, 3), (372, 614, 3), (886, 736, 3), (582, 480, 3), (300, 1436, 3), (765, 600, 3), (432, 614, 3), (420, 857, 3), (300, 1435, 3), (548, 570, 3), (559, 614, 3), (532, 783, 3), (338, 614, 3), (480, 387, 3), (886, 744, 3), (614, 334, 3), (780, 475, 3), (667, 510, 3), (725, 600, 3), (443, 614, 3), (553, 614, 3), (562, 886, 3), (673, 566, 3), (558, 614, 3), (660, 713, 3), (801, 610, 3), (565, 780, 3), (780, 630, 3), (720, 742, 3), (627, 604, 3), (686, 994, 3), (640, 484, 3), (886, 727, 3), (480, 488, 3), (748, 540, 3), (623, 600, 3), (699, 480, 3), (244, 614, 3), (1421, 1033, 3), (791, 600, 3), (600, 840, 3), (526, 614, 3), (600, 667, 3), (567, 614, 3), (903, 751, 3), (745, 626, 3), (1249, 480, 3), (1243, 480, 3), (1089, 480, 3), (1421, 1200, 3), (614, 519, 3), (839, 540, 3), (614, 468, 3), (556, 614, 3), (704, 600, 3), (720, 623, 3), (900, 518, 3), (720, 576, 3), (529, 886, 3), (599, 456, 3), (720, 559, 3), (614, 338, 3), (679, 602, 3), (720, 631, 3), (721, 656, 3), (960, 552, 3), (904, 769, 3), (1032, 800, 3), (738, 556, 3), (695, 605, 3), (626, 480, 3), (757, 660, 3), (1514, 849, 3), (614, 339, 3), (799, 600, 3), (435, 223, 3), (448, 377, 3), (585, 458, 3), (612, 510, 3), (884, 677, 3), (437, 588, 3), (309, 480, 3), (300, 472, 3), (540, 524, 3), (527, 467, 3), (444, 600, 3), (322, 427, 3), (312, 390, 3), (462, 350, 3), (371, 406, 3), (390, 587, 3), (420, 540, 3), (436, 600, 3), (435, 600, 3), (374, 398, 3), (426, 573, 3), (381, 271, 3), (464, 345, 3), (394, 376, 3), (331, 418, 3), (540, 538, 3), (400, 600, 3), (438, 600, 3), (540, 380, 3), (540, 562, 3), (316, 434, 3), (423, 592, 3), (540, 547, 3), (457, 600, 3), (540, 435, 3), (303, 480, 3), (351, 451, 3), (284, 504, 3), (379, 600, 3), (430, 450, 3), (540, 521, 3), (407, 480, 3), (391, 389, 3), (469, 480, 3), (312, 238, 3), (390, 483, 3), (312, 402, 3), (540, 444, 3), (420, 548, 3), (431, 600, 3), (401, 403, 3), (457, 533, 3), (312, 471, 3), (540, 266, 3), (540, 595, 3), (346, 430, 3), (540, 414, 3), (312, 414, 3), (423, 600, 3), (312, 236, 3), (526, 596, 3), (450, 473, 3), (363, 422, 3), (540, 550, 3), (443, 600, 3), (346, 448, 3), (461, 514, 3), (421, 600, 3), (405, 600, 3), (540, 405, 3), (312, 229, 3), (540, 563, 3), (390, 454, 3), (408, 348, 3), (244, 600, 3), (540, 540, 3), (950, 1153, 3), (1024, 1214, 3), (747, 614, 3), (614, 768, 3), (614, 323, 3), (542, 768, 3), (733, 600, 3), (827, 995, 3), (614, 766, 3), (1167, 946, 3), (577, 780, 3), (679, 505, 3), (874, 1344, 3), (445, 614, 3), (691, 614, 3), (442, 540, 3), (895, 1260, 3), (598, 768, 3), (614, 313, 3), (361, 450, 3), (768, 590, 3), (614, 765, 3), (407, 540, 3), (419, 816, 3), (516, 450, 3), (807, 1025, 3), (614, 295, 3), (730, 614, 3), (614, 368, 3), (767, 977, 3), (540, 719, 3), (454, 768, 3), (931, 1214, 3), (901, 600, 3), (613, 496, 3), (596, 450, 3), (789, 1200, 3), (906, 720, 3), (742, 614, 3), (557, 614, 3), (614, 695, 3), (612, 762, 3), (1096, 900, 3), (614, 741, 3), (385, 614, 3), (461, 570, 3), (614, 429, 3), (713, 960, 3), (614, 727, 3), (768, 596, 3), (614, 625, 3), (599, 768, 3), (608, 768, 3), (292, 385, 3), (720, 527, 3), (712, 540, 3), (743, 614, 3), (292, 614, 3), (869, 600, 3), (712, 595, 3), (1220, 946, 3), (936, 717, 3), (569, 768, 3), (1079, 889, 3), (614, 692, 3), (588, 461, 3), (779, 840, 3), (949, 1200, 3), (614, 387, 3), (499, 768, 3), (618, 472, 3), (720, 896, 3), (424, 619, 3), (848, 1080, 3), (367, 768, 3), (614, 367, 3), (806, 1033, 3), (720, 502, 3), (308, 614, 3), (752, 1020, 3), (1067, 780, 3), (507, 768, 3), (1206, 720, 3), (781, 600, 3), (878, 576, 3), (614, 376, 3), (1049, 840, 3), (964, 1214, 3), (849, 1080, 3), (450, 347, 3), (728, 599, 3), (992, 1200, 3), (1205, 946, 3), (553, 768, 3), (926, 720, 3), (614, 335, 3), (926, 1190, 3), (697, 946, 3), (574, 768, 3), (780, 1199, 3), (748, 600, 3), (719, 904, 3), (461, 768, 3), (734, 600, 3), (900, 686, 3), (339, 261, 3), (434, 780, 3), (614, 700, 3), (869, 1156, 3), (772, 614, 3), (787, 956, 3), (698, 840, 3), (805, 1020, 3), (463, 768, 3), (807, 1020, 3), (383, 768, 3), (521, 768, 3), (720, 611, 3), (524, 614, 3), (614, 345, 3), (614, 624, 3), (614, 577, 3), (614, 366, 3), (461, 611, 3), (752, 600, 3), (1148, 900, 3), (767, 1020, 3), (671, 614, 3), (878, 600, 3), (779, 614, 3), (601, 768, 3), (772, 600, 3), (1137, 900, 3), (768, 595, 3), (528, 450, 3), (683, 528, 3), (403, 768, 3), (343, 768, 3), (871, 1089, 3), (452, 643, 3), (585, 768, 3), (944, 1214, 3), (356, 282, 3), (614, 308, 3), (650, 540, 3), (680, 513, 3), (681, 470, 3), (1229, 818, 3), (565, 768, 3), (928, 600, 3), (590, 768, 3), (790, 840, 3), (411, 768, 3), (785, 599, 3), (614, 342, 3), (540, 712, 3), (946, 719, 3), (614, 347, 3), (818, 1080, 3), (517, 768, 3), (1000, 1200, 3), (556, 1140, 3), (485, 768, 3), (614, 337, 3), (596, 768, 3), (829, 600, 3), (671, 960, 3), (785, 614, 3), (614, 759, 3), (768, 506, 3), (588, 450, 3), (378, 768, 3), (544, 450, 3), (1229, 583, 3), (540, 662, 3), (609, 768, 3), (680, 840, 3), (614, 728, 3), (605, 767, 3), (371, 768, 3), (614, 292, 3), (614, 361, 3), (782, 598, 3), (971, 1214, 3), (581, 469, 3), (1229, 729, 3), (900, 724, 3), (535, 418, 3), (794, 600, 3), (874, 720, 3), (330, 614, 3), (782, 614, 3), (620, 780, 3), (534, 768, 3), (767, 600, 3), (614, 355, 3), (932, 1214, 3), (957, 1214, 3), (376, 899, 3), (614, 406, 3), (545, 768, 3), (768, 569, 3), (257, 614, 3), (510, 391, 3), (540, 670, 3), (540, 708, 3), (1130, 900, 3), (754, 614, 3), (614, 357, 3), (592, 450, 3), (1201, 931, 3), (614, 551, 3), (630, 1062, 3), (614, 304, 3), (805, 596, 3), (745, 959, 3), (614, 684, 3), (352, 614, 3), (493, 768, 3), (555, 768, 3), (353, 614, 3), (884, 1214, 3), (829, 614, 3), (737, 683, 3), (614, 623, 3), (989, 780, 3), (614, 750, 3), (518, 648, 3), (631, 1080, 3), (614, 327, 3), (595, 540, 3), (567, 768, 3), (553, 767, 3), (1229, 995, 3), (521, 614, 3), (654, 540, 3), (473, 768, 3), (614, 348, 3), (1229, 848, 3), (528, 1060, 3), (704, 883, 3), (322, 614, 3), (511, 720, 3), (722, 597, 3), (284, 510, 3), (614, 611, 3), (530, 768, 3), (614, 722, 3), (686, 1196, 3), (1003, 1375, 3), (884, 614, 3), (1214, 960, 3), (874, 1296, 3), (1014, 720, 3), (358, 416, 3), (614, 762, 3), (492, 768, 3), (926, 1200, 3), (866, 1320, 3), (589, 860, 3), (1103, 900, 3), (614, 293, 3), (546, 461, 3), (619, 750, 3), (725, 960, 3), (614, 280, 3), (1016, 1214, 3), (614, 723, 3), (608, 484, 3), (614, 289, 3), (997, 780, 3), (721, 508, 3), (498, 614, 3), (374, 767, 3), (706, 840, 3), (475, 768, 3), (1024, 840, 3), (614, 683, 3), (573, 768, 3), (382, 624, 3), (614, 296, 3), (614, 302, 3), (614, 668, 3), (950, 1214, 3), (752, 614, 3), (489, 480, 3), (345, 480, 3), (418, 420, 3), (299, 480, 3), (1220, 1801, 3), (285, 384, 3), (324, 480, 3), (516, 768, 3), (500, 384, 3), (299, 419, 3), (1343, 1800, 3), (682, 922, 3), (623, 614, 3), (765, 580, 3), (384, 614, 3), (600, 825, 3), (600, 752, 3), (684, 922, 3), (688, 919, 3), (647, 922, 3), (922, 920, 3), (1800, 1318, 3), (431, 614, 3), (797, 1178, 3), (742, 922, 3), (710, 922, 3), (622, 877, 3), (1632, 2401, 3), (600, 937, 3), (630, 635, 3), (462, 614, 3), (611, 922, 3), (645, 869, 3), (617, 922, 3), (319, 459, 3), (445, 600, 3), (437, 664, 3)]\n" ], [ "len(shapes)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
4a7a5362a3b673d81e15ecdfb7a3ca398f9963fe
398,279
ipynb
Jupyter Notebook
1_1_Image_Representation/4. Green Screen Car.ipynb
bwassim/CVND_Exercises
ae6ed6696626ce757b3d21bd4df2745b19187674
[ "MIT" ]
null
null
null
1_1_Image_Representation/4. Green Screen Car.ipynb
bwassim/CVND_Exercises
ae6ed6696626ce757b3d21bd4df2745b19187674
[ "MIT" ]
null
null
null
1_1_Image_Representation/4. Green Screen Car.ipynb
bwassim/CVND_Exercises
ae6ed6696626ce757b3d21bd4df2745b19187674
[ "MIT" ]
null
null
null
1,248.523511
164,572
0.960565
[ [ [ "# Color Threshold, Green Screen", "_____no_output_____" ], [ "### Import resources", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\nimport numpy as np\nimport cv2\n\n%matplotlib inline", "_____no_output_____" ] ], [ [ "### Read in and display the image", "_____no_output_____" ] ], [ [ "# Read in the image\nimage = mpimg.imread('images/car_green_screen.jpg')\n\n# Print out the image dimensions (height, width, and depth (color))\nprint('Image dimensions:', image.shape)\n", "Image dimensions: (450, 660, 3)\n" ], [ "# Display the image\nplt.imshow(image)", "_____no_output_____" ] ], [ [ "### Define the color threshold", "_____no_output_____" ] ], [ [ "## TODO: Define our color selection boundaries in RGB values\nlower_green = np.array([0,230,0]) \nupper_green = np.array([60,255,60])\n", "_____no_output_____" ] ], [ [ "### Create a mask", "_____no_output_____" ] ], [ [ "# Define the masked area\nmask = cv2.inRange(image, lower_green, upper_green)\n\n# Vizualize the mask\nplt.imshow(mask, cmap='gray')", "_____no_output_____" ], [ "# Mask the image to let the car show through\nmasked_image = np.copy(image)\n\nmasked_image[mask != 0] = [0, 0, 0]\n\n# Display it!\nplt.imshow(masked_image)\n", "_____no_output_____" ] ], [ [ "### Mask and add a background image", "_____no_output_____" ] ], [ [ "# Load in a background image, and convert it to RGB \nbackground_image = mpimg.imread('images/sky.jpg')\nplt.imshow(background_image)\nb_image = np.copy(background_image)\n## TODO: Crop it or resize the background to be the right size (450x660)\ncrop_background = b_image[0:450,0:660]\n## TODO: Mask the cropped background so that the car area is blocked\ncrop_background[mask == 0] = [0,0,0]\n# Hint mask the opposite area of the previous image\n\n## TODO: Display the background and make sure \nplt.imshow(crop_background)\n", "_____no_output_____" ], [ "## TODO: Mask the cropped background so that the car area is blocked\ncrop_background[mask == 0] = [0,0,0]\n# Hint mask the opposite area of the previous image\n\n## TODO: Display the background and make sure \nplt.imshow(crop_background)", "_____no_output_____" ] ], [ [ "### Create a complete image", "_____no_output_____" ] ], [ [ "## TODO: Add the two images together to create a complete image!\n# complete_image = masked_image + crop_background\ncomplete_image = masked_image + crop_background\nplt.imshow(complete_image)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
4a7a62b63aedb60e69a8bc3e50c852a725ace28d
12,567
ipynb
Jupyter Notebook
ch1/1-5_fine_tuning.ipynb
ONground-Korea/PyTorch_Study
5c696784281c60fee120d6b67639ca072b87d41d
[ "MIT" ]
null
null
null
ch1/1-5_fine_tuning.ipynb
ONground-Korea/PyTorch_Study
5c696784281c60fee120d6b67639ca072b87d41d
[ "MIT" ]
null
null
null
ch1/1-5_fine_tuning.ipynb
ONground-Korea/PyTorch_Study
5c696784281c60fee120d6b67639ca072b87d41d
[ "MIT" ]
null
null
null
27.988864
127
0.501711
[ [ [ "import glob\nimport os.path as osp\nimport random\nimport numpy as np\nimport json\nfrom PIL import Image\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.utils.data as data\nimport torchvision\nfrom torchvision import models, transforms\n\nfrom utils.dataloader_image_classification import ImageTransform, make_datapath_list, HymenopteraDataset\n\n# 난수 시드 설정\ntorch.manual_seed(1234)\nnp.random.seed(1234)\nrandom.seed(1234)", "_____no_output_____" ] ], [ [ "### 데이터셋과 데이터 로더 작성", "_____no_output_____" ] ], [ [ "train_list = make_datapath_list('train')\nval_list = make_datapath_list('val')\n\nsize = 224\nmean = (0.485, 0.456, 0.406)\nstd = (0.229, 0.224, 0.225)\n\ntrain_dataset = HymenopteraDataset(file_list = train_list, transform = ImageTransform(size, mean, std), phase='train')\nval_dataset = HymenopteraDataset(file_list = val_list, transform = ImageTransform(size, mean, std), phase='val')\n\n# DataLoader 작성\nbatch_size = 32\n\ntrain_dataloader = data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)\nval_dataloader = data.DataLoader(dataset=val_dataset, batch_size=batch_size, shuffle=False)\n\n# 사전 객체에 정리\ndataloaders_dict = {'train': train_dataloader, 'val': val_dataloader}", "./data/hymenoptera_data/train/**/*.jpg\n./data/hymenoptera_data/val/**/*.jpg\n" ] ], [ [ "### 네트워크 모델 작성", "_____no_output_____" ] ], [ [ "# VGG-16 모델의 인스턴스 생성\nuse_pretrained = True # 학습된 파라미터 사용\nnet = models.vgg16(pretrained=use_pretrained)\n\n# VGG의 마지막 출력층의 출력 유닛을 개미와 벌인 두 개로 변경 (전결합 층)\nnet.classifier[6] = nn.Linear(in_features=4096, out_features=2)\n\n# 훈련 모드로 설정\nnet.train()\n\nprint('네트워크 설정 완료: 학습된 가중치를 읽어들여 훈련 모드로 설정했습니다.')", "네트워크 설정 완료: 학습된 가중치를 읽어들여 훈련 모드로 설정했습니다.\n" ] ], [ [ "### 손실함수 정의", "_____no_output_____" ] ], [ [ "criterion = nn.CrossEntropyLoss()", "_____no_output_____" ] ], [ [ "### 최적화 기법 설정", "_____no_output_____" ] ], [ [ "# 파인튜닝으로 학습시킬 파라미터를 params_to_update 변수에 저장\nparams_to_update_1 = []\nparams_to_update_2 = []\nparams_to_update_3 = []\n\n# 학습시킬 파라미터 이름\nupdate_param_names_1 = ['features']\nupdate_param_names_2 = ['classifier.0.weight', 'classifier.0.bias', 'classifier.3.weight', 'classifier.3.bias']\nupdate_param_names_3 = ['classifier.6.weight', 'classifier.6.bias']\n\n# 파라미터를 각 리스트에 저장\nfor name, param in net.named_parameters():\n if update_param_names_1[0] in name:\n param.requires_grad = True\n params_to_update_1.append(param)\n print('params_to_update_1에 저장: ', name)\n \n elif name in update_param_names_2:\n param.requires_grad = True\n params_to_update_2.append(param)\n print('params_to_update_2에 저장: ', name)\n \n elif name in update_param_names_3:\n param.requires_grad = True\n params_to_update_3.append(param)\n print('params_to_update_3에 저장: ', name)\n \n else:\n param.requires_grad = False\n print('경사 계산 없음. 학습하지 않음: ', name)\n ", "params_to_update_1에 저장: features.0.weight\nparams_to_update_1에 저장: features.0.bias\nparams_to_update_1에 저장: features.2.weight\nparams_to_update_1에 저장: features.2.bias\nparams_to_update_1에 저장: features.5.weight\nparams_to_update_1에 저장: features.5.bias\nparams_to_update_1에 저장: features.7.weight\nparams_to_update_1에 저장: features.7.bias\nparams_to_update_1에 저장: features.10.weight\nparams_to_update_1에 저장: features.10.bias\nparams_to_update_1에 저장: features.12.weight\nparams_to_update_1에 저장: features.12.bias\nparams_to_update_1에 저장: features.14.weight\nparams_to_update_1에 저장: features.14.bias\nparams_to_update_1에 저장: features.17.weight\nparams_to_update_1에 저장: features.17.bias\nparams_to_update_1에 저장: features.19.weight\nparams_to_update_1에 저장: features.19.bias\nparams_to_update_1에 저장: features.21.weight\nparams_to_update_1에 저장: features.21.bias\nparams_to_update_1에 저장: features.24.weight\nparams_to_update_1에 저장: features.24.bias\nparams_to_update_1에 저장: features.26.weight\nparams_to_update_1에 저장: features.26.bias\nparams_to_update_1에 저장: features.28.weight\nparams_to_update_1에 저장: features.28.bias\nparams_to_update_2에 저장: classifier.0.weight\nparams_to_update_2에 저장: classifier.0.bias\nparams_to_update_2에 저장: classifier.3.weight\nparams_to_update_2에 저장: classifier.3.bias\nparams_to_update_3에 저장: classifier.6.weight\nparams_to_update_3에 저장: classifier.6.bias\n" ], [ "# 최적화 기법 설정\noptimizer = optim.SGD([\n {'params': params_to_update_1, 'lr': 1e-4},\n {'params': params_to_update_2, 'lr': 5e-4},\n {'params': params_to_update_3, 'lr': 1e-3}\n], momentum=0.9)", "_____no_output_____" ] ], [ [ "### 학습 및 검증 실시", "_____no_output_____" ] ], [ [ "def train_model(net, dataloaders_dict, criterion, optimizer, num_epochs):\n \n # 초기 설정\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n print('사용 장치: ', device)\n \n # 네트워크를 GPU로\n net.to(device)\n \n # 네트워크가 어느정도 고정되면 고속화시킨다.\n torch.backends.cudnn.benchmark = True\n \n # 에폭 루프\n for epoch in range(num_epochs):\n print('Epoch {}/{}'.format(epoch+1, num_epochs))\n print('--------------------------------')\n \n # 에폭별 학습 및 검증 루프\n for phase in ['train', 'val']:\n if phase == 'train':\n net.train()\n else:\n net.eval()\n \n epoch_loss = 0.0 # 에폭 손실 합\n epoch_corrects = 0 # 에폭 정답 수\n \n # 학습하지 않을 시 검증 성능을 확인하기 위해 epoch=0의 훈련은 생략\n if (epoch==0) and (phase=='train'):\n continue\n \n # 데이터로더로 미니 배치를 꺼내는 루프\n for inputs, labels in tqdm(dataloaders_dict[phase]):\n # GPU에 데이터를 보낸다.\n inputs = inputs.to(device)\n labels = labels.to(device)\n \n optimizer.zero_grad()\n \n # 순전파 계산\n with torch.set_grad_enabled(phase=='train'):\n outputs = net(inputs)\n loss = criterion(outputs, labels)\n _, preds = torch.max(outputs, 1)\n \n # 훈련시에는 오차 역전파\n if phase == 'train':\n loss.backward()\n optimizer.step()\n \n # 반복 결과 계산\n # 손실 합계 갱신\n epoch_loss += loss.item()*inputs.size(0)\n # 정답 수의 합계 갱신\n epoch_corrects += torch.sum(preds==labels.data)\n \n # 에폭당 손실과 정답률 표시\n epoch_loss = epoch_loss / len(dataloaders_dict[phase].dataset)\n epoch_acc = epoch_corrects.double() / len(dataloaders_dict[phase].dataset)\n \n print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))", "_____no_output_____" ], [ "# 학습 및 검증 실시\nnum_epochs = 2\ntrain_model(net, dataloaders_dict, criterion, optimizer, num_epochs)", "사용 장치: cuda:0\nEpoch 1/2\n--------------------------------\n" ] ], [ [ "### 학습한 네트워크 저장 및 로드", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
4a7a6ae0ce56dd58cf4861b807a8ceb58eff23e9
680,436
ipynb
Jupyter Notebook
kc_housing_price.ipynb
ADDYBOY/Houses_Kc_Data_Prediction
e2e7687ba263a0932e44b27429f4debe760be6f3
[ "MIT" ]
1
2020-06-06T09:17:23.000Z
2020-06-06T09:17:23.000Z
Houses_Kc_Data_Prediction-master/kc_housing_price_SOLUTION.ipynb
abhisngh/Data-Science
c7fa9e4d81c427382fb9a9d3b97912ef2b21f3ae
[ "MIT" ]
null
null
null
Houses_Kc_Data_Prediction-master/kc_housing_price_SOLUTION.ipynb
abhisngh/Data-Science
c7fa9e4d81c427382fb9a9d3b97912ef2b21f3ae
[ "MIT" ]
1
2020-04-26T09:47:45.000Z
2020-04-26T09:47:45.000Z
1,255.416974
659,484
0.948984
[ [ [ "#importing all the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\n\n%matplotlib inline\n", "_____no_output_____" ], [ "#importing dataset using panda\ndataset = pd.read_csv('kc_house_data.csv')\n#to see what my dataset is comprised of\ndataset.head()", "_____no_output_____" ], [ "#checking for categorical data\nprint(dataset.dtypes)", "id int64\ndate object\nprice float64\nbedrooms int64\nbathrooms float64\nsqft_living int64\nsqft_lot int64\nfloors float64\nwaterfront int64\nview int64\ncondition int64\ngrade int64\nsqft_above int64\nsqft_basement int64\nyr_built int64\nyr_renovated int64\nzipcode int64\nlat float64\nlong float64\nsqft_living15 int64\nsqft_lot15 int64\ndtype: object\n" ], [ "#dropping the id and date column\ndataset = dataset.drop(['id','date'], axis = 1)", "_____no_output_____" ], [ "print(dataset.head())", " price bedrooms bathrooms sqft_living sqft_lot floors waterfront \\\n0 221900.0 3 1.00 1180 5650 1.0 0 \n1 538000.0 3 2.25 2570 7242 2.0 0 \n2 180000.0 2 1.00 770 10000 1.0 0 \n3 604000.0 4 3.00 1960 5000 1.0 0 \n4 510000.0 3 2.00 1680 8080 1.0 0 \n\n view condition grade sqft_above sqft_basement yr_built yr_renovated \\\n0 0 3 7 1180 0 1955 0 \n1 0 3 7 2170 400 1951 1991 \n2 0 3 6 770 0 1933 0 \n3 0 5 7 1050 910 1965 0 \n4 0 3 8 1680 0 1987 0 \n\n zipcode lat long sqft_living15 sqft_lot15 \n0 98178 47.5112 -122.257 1340 5650 \n1 98125 47.7210 -122.319 1690 7639 \n2 98028 47.7379 -122.233 2720 8062 \n3 98136 47.5208 -122.393 1360 5000 \n4 98074 47.6168 -122.045 1800 7503 \n" ], [ "#understanding the distribution with seaborn\nwith sns.plotting_context(\"notebook\",font_scale=2.5):\n g = sns.pairplot(dataset[['sqft_lot','sqft_above','price','bedrooms','sqft_living']], \n hue='bedrooms', palette='tab20',height=6)\ng.set(xticklabels=[]);", "c:\\users\\aditya yadav\\appdata\\local\\programs\\python\\python38-32\\lib\\site-packages\\seaborn\\distributions.py:288: UserWarning: Data must have variance to compute a kernel density estimate.\n warnings.warn(msg, UserWarning)\nc:\\users\\aditya yadav\\appdata\\local\\programs\\python\\python38-32\\lib\\site-packages\\seaborn\\distributions.py:288: UserWarning: Data must have variance to compute a kernel density estimate.\n warnings.warn(msg, UserWarning)\nc:\\users\\aditya yadav\\appdata\\local\\programs\\python\\python38-32\\lib\\site-packages\\seaborn\\distributions.py:288: UserWarning: Data must have variance to compute a kernel density estimate.\n warnings.warn(msg, UserWarning)\nc:\\users\\aditya yadav\\appdata\\local\\programs\\python\\python38-32\\lib\\site-packages\\seaborn\\distributions.py:288: UserWarning: Data must have variance to compute a kernel density estimate.\n warnings.warn(msg, UserWarning)\nc:\\users\\aditya yadav\\appdata\\local\\programs\\python\\python38-32\\lib\\site-packages\\seaborn\\distributions.py:288: UserWarning: Data must have variance to compute a kernel density estimate.\n warnings.warn(msg, UserWarning)\nc:\\users\\aditya yadav\\appdata\\local\\programs\\python\\python38-32\\lib\\site-packages\\seaborn\\distributions.py:288: UserWarning: Data must have variance to compute a kernel density estimate.\n warnings.warn(msg, UserWarning)\nc:\\users\\aditya yadav\\appdata\\local\\programs\\python\\python38-32\\lib\\site-packages\\seaborn\\distributions.py:288: UserWarning: Data must have variance to compute a kernel density estimate.\n warnings.warn(msg, UserWarning)\nc:\\users\\aditya yadav\\appdata\\local\\programs\\python\\python38-32\\lib\\site-packages\\seaborn\\distributions.py:288: UserWarning: Data must have variance to compute a kernel density estimate.\n warnings.warn(msg, UserWarning)\n" ], [ "#separating independent and dependent variable\nX = dataset.iloc[:,1:].values\ny = dataset.iloc[:,0].values", "_____no_output_____" ], [ "print(y)", "[221900. 538000. 180000. ... 402101. 400000. 325000.]\n" ], [ "#splitting dataset into training and testing dataset\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 1/3, random_state = 0)", "_____no_output_____" ], [ "from sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor.fit(X_train, y_train)\n\n# Predicting the Test set results\ny_pred = regressor.predict(X_test)", "_____no_output_____" ], [ "#Backward Elimination\nimport statsmodels.api as sm\ndef backwardElimination(x, SL):\n numVars = len(x[0])\n temp = np.zeros((21613,19)).astype(int)\n for i in range(0, numVars):\n regressor_OLS = sm.OLS(endog=y, exog=X_opt).fit()\n maxVar = max(regressor_OLS.pvalues).astype(float)\n adjR_before = regressor_OLS.rsquared_adj.astype(float)\n if maxVar > SL:\n for j in range(0, numVars - i):\n if (regressor_OLS.pvalues[j].astype(float) == maxVar):\n temp[:,j] = x[:, j]\n x = np.delete(x, j, 1)\n tmp_regressor = sm.OLS(y, x).fit()\n adjR_after = tmp_regressor.rsquared_adj.astype(float)\n if (adjR_before >= adjR_after):\n x_rollback = np.hstack((x, temp[:,[0,j]]))\n x_rollback = np.delete(x_rollback, j, 1)\n print (regressor_OLS.summary())\n return x_rollback\n else:\n continue\n regressor_OLS.summary()\n return x\n \nSL = 0.05\nX_opt = X[:, [0, 1, 2, 3, 4, 5,6,7,8,9,10,11,12,13,14,15,16,17]]\nX_Modeled = backwardElimination(X_opt, SL)", " OLS Regression Results \n=======================================================================================\nDep. Variable: y R-squared (uncentered): 0.905\nModel: OLS Adj. R-squared (uncentered): 0.905\nMethod: Least Squares F-statistic: 1.211e+04\nDate: Sun, 26 Apr 2020 Prob (F-statistic): 0.00\nTime: 05:19:37 Log-Likelihood: -2.9461e+05\nNo. Observations: 21613 AIC: 5.892e+05\nDf Residuals: 21596 BIC: 5.894e+05\nDf Model: 17 \nCovariance Type: nonrobust \n==============================================================================\n coef std err t P>|t| [0.025 0.975]\n------------------------------------------------------------------------------\nx1 -3.551e+04 1888.716 -18.802 0.000 -3.92e+04 -3.18e+04\nx2 4.105e+04 3253.759 12.618 0.000 3.47e+04 4.74e+04\nx3 110.2642 2.268 48.607 0.000 105.818 114.711\nx4 0.1334 0.048 2.786 0.005 0.040 0.227\nx5 5261.5471 3541.347 1.486 0.137 -1679.755 1.22e+04\nx6 5.833e+05 1.74e+04 33.598 0.000 5.49e+05 6.17e+05\nx7 5.236e+04 2128.298 24.600 0.000 4.82e+04 5.65e+04\nx8 2.721e+04 2323.818 11.709 0.000 2.27e+04 3.18e+04\nx9 9.548e+04 2145.492 44.503 0.000 9.13e+04 9.97e+04\nx10 71.3928 2.238 31.902 0.000 67.006 75.779\nx11 38.8714 2.624 14.813 0.000 33.728 44.015\nx12 -2561.7953 68.006 -37.670 0.000 -2695.092 -2428.498\nx13 20.4187 3.646 5.600 0.000 13.272 27.566\nx14 -519.0756 17.826 -29.119 0.000 -554.016 -484.136\nx15 6.022e+05 1.07e+04 56.106 0.000 5.81e+05 6.23e+05\nx16 -2.179e+05 1.31e+04 -16.683 0.000 -2.44e+05 -1.92e+05\nx17 23.0994 3.392 6.811 0.000 16.452 29.747\nx18 -0.3761 0.073 -5.137 0.000 -0.520 -0.233\n==============================================================================\nOmnibus: 18403.146 Durbin-Watson: 1.991\nProb(Omnibus): 0.000 Jarque-Bera (JB): 1873534.498\nSkew: 3.572 Prob(JB): 0.00\nKurtosis: 48.049 Cond. No. 1.17e+16\n==============================================================================\n\nWarnings:\n[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n[2] The smallest eigenvalue is 1.61e-18. This might indicate that there are\nstrong multicollinearity problems or that the design matrix is singular.\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a7a6c89478ee668ec466a6f32f21d6f58db602c
156,426
ipynb
Jupyter Notebook
Capstone Part 2a - Classical ML Models (MFCCs with Offset).ipynb
pwmin/Capstone-Project
56a25258ba7269d094f5a73b31ba067d11a9511e
[ "MIT" ]
1
2019-12-24T00:40:24.000Z
2019-12-24T00:40:24.000Z
Capstone Part 2a - Classical ML Models (MFCCs with Offset).ipynb
pwmin/Capstone-Project
56a25258ba7269d094f5a73b31ba067d11a9511e
[ "MIT" ]
null
null
null
Capstone Part 2a - Classical ML Models (MFCCs with Offset).ipynb
pwmin/Capstone-Project
56a25258ba7269d094f5a73b31ba067d11a9511e
[ "MIT" ]
null
null
null
46.320995
16,028
0.580901
[ [ [ "# Capstone Part 2a - Classical ML Models (MFCCs with Offset)\n___\n## Setup", "_____no_output_____" ] ], [ [ "# Basic packages\nimport numpy as np\nimport pandas as pd\n\n# For splitting the data into training and test sets\nfrom sklearn.model_selection import train_test_split\n\n# For scaling the data as necessary\nfrom sklearn.preprocessing import StandardScaler\n\n# For doing principal component analysis as necessary\nfrom sklearn.decomposition import PCA\n\n# For visualizations\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\n# For building a variety of models\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import RandomForestClassifier\nfrom xgboost import XGBClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\n\n# For hyperparameter optimization\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.model_selection import GridSearchCV\n\n# For caching pipeline and grid search results\nfrom tempfile import mkdtemp\n\n# For model evaluation\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import classification_report\n\n# For getting rid of warning messages\nimport warnings\nwarnings.filterwarnings('ignore')\n\n# For pickling models\nimport joblib\n\n# Loading in the finished dataframe from part 1\nravdess_mfcc_df = pd.read_csv('C:/Users/Patrick/Documents/Capstone Data/ravdess_mfcc.csv')", "_____no_output_____" ] ], [ [ "___\n# Building Models for Classifying Gender (Regardless of Emotion)", "_____no_output_____" ] ], [ [ "# Splitting the dataframe into features and target\nX = ravdess_mfcc_df.iloc[:, :-2]\ng = ravdess_mfcc_df['Gender']", "_____no_output_____" ] ], [ [ "The convention is to name the target variable 'y', but I will be declaring many different target variables throughout the notebook, so I opted for 'g' for simplicity instead of 'y_g' or 'y_gen', for example.", "_____no_output_____" ] ], [ [ "# # Encoding the genders\n# gender_encoder = LabelEncoder()\n# g = gender_encoder.fit_transform(g)\n\n# # Checking the results\n# g", "_____no_output_____" ], [ "# # Which number represents which gender?\n# for num in np.unique(g):\n# print(f'{num} represents {gender_encoder.inverse_transform([num])[0]}.')", "_____no_output_____" ] ], [ [ "Note: I realized that encoding the target is unnecessary; it is done automatically by the models.", "_____no_output_____" ] ], [ [ "# What test size should I use?\nprint(f'Length of g: {len(g)}')\nprint(f'30% of {len(g)} is {len(g)*0.3}')", "Length of g: 1438\n30% of 1438 is 431.4\n" ] ], [ [ "I will use 30%.", "_____no_output_____" ] ], [ [ "# Splitting the data into training and test sets\nX_train, X_test, g_train, g_test = train_test_split(X, g, test_size=0.3, stratify=g, random_state=1)", "_____no_output_____" ], [ "# Checking the shapes\nprint(X_train.shape)\nprint(X_test.shape)\nprint(g_train.shape)\nprint(g_test.shape)", "(1006, 2600)\n(432, 2600)\n(1006,)\n(432,)\n" ] ], [ [ "I want to build a simple, initial classifier to get a sense of the performances I might get in more optimized models. To this end, I will build a logistic regression model without doing any cross-validation or hyperparameter optimization.", "_____no_output_____" ] ], [ [ "# Instantiate the model\ninitial_logreg = LogisticRegression()\n\n# Fit to training set\ninitial_logreg.fit(X_train, g_train)\n\n# Score on training set\nprint(f'Model accuracy on training set: {initial_logreg.score(X_train, g_train)*100}%')\n\n# Score on test set\nprint(f'Model accuracy on test set: {initial_logreg.score(X_test, g_test)*100}%')", "Model accuracy on training set: 100.0%\nModel accuracy on test set: 99.07407407407408%\n" ] ], [ [ "These are extremely high accuracies. The model has most likely overfit to the training set, but the accuracy on the test set is still surprisingly high.\n\nHere are some possible explanations:\n- The dataset (RAVDESS) is relatively small, with only 1440 data points (1438 if I do not count the two very short clips that I excluded). This model is likely not very robust and has easily overfit to the training set.\n- The features I have extracted could be excellent predictors of gender.\n- This could be a very simple classification task. After all, there are only two classes, and theoretically, features extracted from male and female voice clips should have distinguishable patterns.\n\nI had originally planned to build more gender classification models for this dataset, but I will forgo this for now. In part 4, I will try using this model to classify clips from another dataset and examine its performance.", "_____no_output_____" ] ], [ [ "# Pickling the model for later use\njoblib.dump(initial_logreg, 'pickle1_gender_logreg.pkl') ", "_____no_output_____" ] ], [ [ "___\n# Building Models for Classifying Emotion for Males", "_____no_output_____" ] ], [ [ "# Making a new dataframe that contains only male recordings\nravdess_mfcc_m_df = ravdess_mfcc_df[ravdess_mfcc_df['Gender'] == 'male'].reset_index().drop('index', axis=1)\nravdess_mfcc_m_df", "_____no_output_____" ], [ "# Splitting the dataframe into features and target\nXm = ravdess_mfcc_m_df.iloc[:, :-2]\nem = ravdess_mfcc_m_df['Emotion']", "_____no_output_____" ], [ "# # Encoding the emotions\n# emotion_encoder = LabelEncoder()\n# em = emotion_encoder.fit_transform(em)\n\n# # Checking the results\n# em", "_____no_output_____" ], [ "# # Which number represents which emotion?\n# for num in np.unique(em):\n# print(f'{num} represents {emotion_encoder.inverse_transform([num])[0]}.')", "_____no_output_____" ] ], [ [ "Note: I realized that encoding the target is unnecessary; it is done automatically by the models.", "_____no_output_____" ] ], [ [ "# Splitting the data into training and test sets\nXm_train, Xm_test, em_train, em_test = train_test_split(Xm, em, test_size=0.3, stratify=em, random_state=1)", "_____no_output_____" ], [ "# Checking the shapes\nprint(Xm_train.shape)\nprint(Xm_test.shape)\nprint(em_train.shape)\nprint(em_test.shape)", "(502, 2600)\n(216, 2600)\n(502,)\n(216,)\n" ] ], [ [ "As before, I will try building an initial model.", "_____no_output_____" ] ], [ [ "# Instantiate the model\ninitial_logreg_em = LogisticRegression()\n\n# Fit to training set\ninitial_logreg_em.fit(Xm_train, em_train)\n\n# Score on training set\nprint(f'Model accuracy on training set: {initial_logreg_em.score(Xm_train, em_train)*100}%')\n\n# Score on test set\nprint(f'Model accuracy on test set: {initial_logreg_em.score(Xm_test, em_test)*100}%')", "Model accuracy on training set: 100.0%\nModel accuracy on test set: 56.481481481481474%\n" ] ], [ [ "The model has overfit to the training set yet again, and this time the accuracy on the test set leaves a lot to be desired. Let's evaluate the model further using a confusion matrix and a classification report.", "_____no_output_____" ] ], [ [ "# Having initial_logreg_em make predictions based on the test set features\nem_pred = initial_logreg_em.predict(Xm_test)\n\n# Building the confusion matrix as a dataframe\nemotions = ['angry', 'calm', 'disgusted', 'fearful', 'happy', 'neutral', 'sad', 'surprised']\nem_confusion_df = pd.DataFrame(confusion_matrix(em_test, em_pred))\nem_confusion_df.columns = [f'Predicted {emotion}' for emotion in emotions]\nem_confusion_df.index = [f'Actual {emotion}' for emotion in emotions]\nem_confusion_df", "_____no_output_____" ], [ "# Classification report\nprint(classification_report(em_test, em_pred))", " precision recall f1-score support\n\n angry 0.62 0.83 0.71 29\n calm 0.67 0.93 0.78 28\n disgusted 0.62 0.45 0.52 29\n fearful 0.62 0.69 0.66 29\n happy 0.42 0.45 0.43 29\n neutral 0.31 0.36 0.33 14\n sad 0.50 0.34 0.41 29\n surprised 0.61 0.38 0.47 29\n\n accuracy 0.56 216\n macro avg 0.55 0.55 0.54 216\nweighted avg 0.56 0.56 0.55 216\n\n" ] ], [ [ "In a binary classification problem, there is one negative class and one positive class. This is not the case here, because this is a multiclass classification problem. In the table above, each row of precision and recall scores assumes the corresponding emotion is the positive class, and groups all other emotions as the negative class.\n\nPrecision is the following measure: Of all the data points that the model classified as belonging to the positive class (i.e., the true and false positives), what proportion is correct (i.e., truly positive)?\n\nRecall is the following measure: Of all the data points that are truly positive (i.e., the true positives and false negatives as classified by the model), what proportion did the model correctly classify (i.e., the true positives)?\n\nIt appears that the initial model is strongest at classifying calm voice clips, and weakest at classifying neutral voice clips. In order of strongest to weakest: calm, angry, fearful, disgusted, surprised, happy, sad, and neutral.", "_____no_output_____" ], [ "I will now try building new models and optimizing hyperparameters to obtain better performance. I will use a pipeline and multiple grid searches to accomplish this.\n\nBefore I build all my models in bulk, I want to see if doing principal component analysis (PCA) could be beneficial. I will do PCA on both unscaled and scaled features, and plot the resulting explained variance ratios. I have two goals here:\n- Get a sense of whether scaling would be beneficial for model performance\n- Get a sense of how many principal components I should use", "_____no_output_____" ] ], [ [ "# PCA on unscaled features\n\n# Instantiate PCA and fit to Xm_train\npca = PCA().fit(Xm_train)\n\n# Transform Xm_train\nXm_train_pca = pca.transform(Xm_train)\n\n# Transform Xm_test\nXm_test_pca = pca.transform(Xm_test)", "_____no_output_____" ], [ "# Standard scaling\n\n# Instantiate the scaler and fit to Xm_train\nscaler = StandardScaler().fit(Xm_train)\n\n# Transform Xm_train\nXm_train_scaled = scaler.transform(Xm_train)\n\n# Transform Xm_test\nXm_test_scaled = scaler.transform(Xm_test)", "_____no_output_____" ], [ "# PCA on scaled features\n\n# Instantiate PCA and fit to Xm_train_scaled\npca_scaled = PCA().fit(Xm_train_scaled)\n\n# Transform Xm_train_scaled\nXm_train_scaled_pca = pca_scaled.transform(Xm_train_scaled)\n\n# Transform Xm_test_scaled\nXm_test_scaled_pca = pca_scaled.transform(Xm_test_scaled)", "_____no_output_____" ], [ "# Plot the explained variance ratios\n\nplt.subplots(1, 2, figsize = (15, 5))\n\n# Unscaled\nplt.subplot(1, 2, 1)\nplt.bar(np.arange(1, len(pca.explained_variance_ratio_)+1), pca.explained_variance_ratio_)\nplt.xlabel('Principal Component')\nplt.ylabel('Explained Variance Ratio')\nplt.title('PCA on Unscaled Features')\nplt.ylim(top = 0.5) # Equalizing the y-axes\n\n# Scaled\nplt.subplot(1, 2, 2)\nplt.bar(np.arange(1, len(pca_scaled.explained_variance_ratio_)+1), pca_scaled.explained_variance_ratio_)\nplt.xlabel('Principal Component')\nplt.ylabel('Explained Variance Ratio')\nplt.title('PCA on Scaled Features')\nplt.ylim(top = 0.5) # Equalizing the y-axes\n\nplt.tight_layout()\nplt.show()", "_____no_output_____" ] ], [ [ "Principal components are linear combinations of the original features, ordered by how much of the dataset's variance they explain. Looking at the two plots above, it appears that for the same number of principal components, those using unscaled features are able to explain more variance (i.e., capture more information) than those using scaled features. For example, looking at the first ~25 principal components of each plot, the bars of the left plot (unscaled) are higher and skewed more to the left than those of the right plot (scaled). Since the purpose of PCA is to reduce dimensionality of the data by keeping the components that explain the most variance and discarding the rest, the unscaled principal components might benefit my models more than the scaled principal components will.\n\nHowever, I have to be mindful of the underlying variance in my features. Some features have values in the -800s, while others are close to 0.", "_____no_output_____" ] ], [ [ "# Examining the variances\nvar_df = pd.DataFrame(ravdess_mfcc_m_df.var()).T\nvar_df", "_____no_output_____" ] ], [ [ "Since PCA is looking for high variance directions, it can become biased by the underlying variance in a given feature if I do not scale it down first. I can see that some features have much higher variance than others do, so there is likely a lot of bias in the unscaled principal components above.\n\nHow much variance is explained by certain numbers of unscaled and scaled principal components? This will help me determine how many principal components to try in my grid searches later.", "_____no_output_____" ] ], [ [ "# Unscaled\nnum_components = [503, 451, 401, 351, 301, 251, 201, 151, 101, 51]\nfor n in num_components:\n print(f'Variance explained by {n-1} unscaled principal components: {np.round(np.sum(pca.explained_variance_ratio_[:n])*100, 2)}%')", "Variance explained by 502 unscaled principal components: 100.0%\nVariance explained by 450 unscaled principal components: 99.94%\nVariance explained by 400 unscaled principal components: 99.84%\nVariance explained by 350 unscaled principal components: 99.69%\nVariance explained by 300 unscaled principal components: 99.47%\nVariance explained by 250 unscaled principal components: 99.14%\nVariance explained by 200 unscaled principal components: 98.63%\nVariance explained by 150 unscaled principal components: 97.84%\nVariance explained by 100 unscaled principal components: 96.43%\nVariance explained by 50 unscaled principal components: 93.15%\n" ], [ "# Scaled\nnum_components = [503, 451, 401, 351, 301, 251, 201, 151, 101, 51]\nfor n in num_components:\n print(f'Variance explained by {n-1} scaled principal components: {np.round(np.sum(pca_scaled.explained_variance_ratio_[:n])*100, 2)}%')", "Variance explained by 502 scaled principal components: 100.0%\nVariance explained by 450 scaled principal components: 99.52%\nVariance explained by 400 scaled principal components: 98.71%\nVariance explained by 350 scaled principal components: 97.51%\nVariance explained by 300 scaled principal components: 95.79%\nVariance explained by 250 scaled principal components: 93.36%\nVariance explained by 200 scaled principal components: 89.86%\nVariance explained by 150 scaled principal components: 84.76%\nVariance explained by 100 scaled principal components: 76.91%\nVariance explained by 50 scaled principal components: 63.35%\n" ] ], [ [ "I will now build a pipeline and multiple grid searches with five-fold cross-validation to optimize the hyperparameters. I will try five types of classifiers: logistic regression, support vector machine, random forest, XGBoost, and k-nearest neighbours. To get a better sense of how each type performs, I will make a grid search for each one. I will also try different numbers of principal components for unscaled and scaled features.", "_____no_output_____" ] ], [ [ "# Cache\ncachedir = mkdtemp()\n\n# Pipeline (these values are placeholders)\nmy_pipeline = Pipeline(steps=[('scaler', StandardScaler()), ('dim_reducer', PCA()), ('model', LogisticRegression())], memory=cachedir)", "_____no_output_____" ], [ "# Parameter grid for log reg\nlogreg_param_grid = [\n # l1 without PCA\n # unscaled and scaled * 9 regularization strengths = 18 models\n {'scaler': [None, StandardScaler()], 'dim_reducer': [None], 'model': [LogisticRegression(penalty='l1', n_jobs=-1)],\n 'model__C': [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000]},\n \n # l1 unscaled with PCA\n # 5 PCAs * 9 regularization strengths = 45 models\n {'scaler': [None], 'dim_reducer': [PCA()], 'dim_reducer__n_components': np.arange(50, 251, 50),\n 'model': [LogisticRegression(penalty='l1', n_jobs=-1)], 'model__C': [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000]},\n \n # l1 scaled with PCA\n # 4 PCAs * 9 regularization strengths = 36 models\n {'scaler': [StandardScaler()], 'dim_reducer': [PCA()], 'dim_reducer__n_components': np.arange(200, 351, 50),\n 'model': [LogisticRegression(penalty='l1', n_jobs=-1)], 'model__C': [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000]},\n \n # l2 (default) without PCA\n # unscaled and scaled * 9 regularization strengths = 18 models\n {'scaler': [None, StandardScaler()], 'dim_reducer': [None], 'model': [LogisticRegression(solver='lbfgs', n_jobs=-1)],\n 'model__C': [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000]},\n \n # l2 (default) unscaled with PCA\n # 5 PCAs * 9 regularization strengths = 45 models\n {'scaler': [None], 'dim_reducer': [PCA()], 'dim_reducer__n_components': np.arange(50, 251, 50),\n 'model': [LogisticRegression(solver='lbfgs', n_jobs=-1)], 'model__C': [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000]},\n \n # l2 (default) scaled with PCA\n # 4 PCAs * 9 regularization strengths = 36 models\n {'scaler': [StandardScaler()], 'dim_reducer': [PCA()], 'dim_reducer__n_components': np.arange(200, 351, 50),\n 'model': [LogisticRegression(solver='lbfgs', n_jobs=-1)], 'model__C': [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000]}\n]\n\n# Instantiate the log reg grid search\nlogreg_grid_search = GridSearchCV(estimator=my_pipeline, param_grid=logreg_param_grid, cv=5, n_jobs=-1, verbose=5)", "_____no_output_____" ], [ "# Fit the log reg grid search\nfitted_logreg_grid_em = logreg_grid_search.fit(Xm_train, em_train)", "Fitting 5 folds for each of 198 candidates, totalling 990 fits\n" ], [ "# What was the best log reg?\nfitted_logreg_grid_em.best_estimator_", "_____no_output_____" ], [ "print(f\"The best log reg's accuracy on the training set: {fitted_logreg_grid_em.score(Xm_train, em_train)*100}%\")\nprint(f\"The best log reg's accuracy on the test set: {fitted_logreg_grid_em.score(Xm_test, em_test)*100}%\")", "The best log reg's accuracy on the training set: 100.0%\nThe best log reg's accuracy on the test set: 58.79629629629629%\n" ], [ "# Pickling the best log reg for later use\njoblib.dump(fitted_logreg_grid_em.best_estimator_, 'pickle2_male_emotion_logreg.pkl')", "_____no_output_____" ], [ "# Parameter grid for SVM\nsvm_param_grid = [\n # unscaled and scaled * 9 regularization strengths = 18 models\n {'scaler': [None, StandardScaler()], 'dim_reducer': [None], 'model': [SVC()], 'model__C': [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000]},\n \n # unscaled\n # 5 PCAs * 9 regularization strengths = 45 models\n {'scaler': [None], 'dim_reducer': [PCA()], 'dim_reducer__n_components': np.arange(50, 251, 50), 'model': [SVC()],\n 'model__C':[0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000]},\n \n # scaled\n # 4 PCAs * 9 regularization strengths = 36 models\n {'scaler': [StandardScaler()], 'dim_reducer': [PCA()], 'dim_reducer__n_components': np.arange(200, 351, 50), 'model': [SVC()],\n 'model__C':[0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000]}\n]\n\n# Instantiate the SVM grid search\nsvm_grid_search = GridSearchCV(estimator=my_pipeline, param_grid=svm_param_grid, cv=5, n_jobs=-1, verbose=5)", "_____no_output_____" ], [ "# Fit the SVM grid search\nfitted_svm_grid_em = svm_grid_search.fit(Xm_train, em_train)", "Fitting 5 folds for each of 99 candidates, totalling 495 fits\n" ], [ "# What was the best SVM?\nfitted_svm_grid_em.best_estimator_", "_____no_output_____" ], [ "print(f\"The best SVM's accuracy on the training set: {fitted_svm_grid_em.score(Xm_train, em_train)*100}%\")\nprint(f\"The best SVM's accuracy on the test set: {fitted_svm_grid_em.score(Xm_test, em_test)*100}%\")", "The best SVM's accuracy on the training set: 100.0%\nThe best SVM's accuracy on the test set: 60.18518518518518%\n" ], [ "# Pickling the best SVM for later use\njoblib.dump(fitted_svm_grid_em.best_estimator_, 'pickle3_male_emotion_svm.pkl')", "_____no_output_____" ], [ "# Parameter grid for random forest (scaling is unnecessary)\nrf_param_grid = [\n # 5 numbers of estimators * 5 max depths = 25 models\n {'scaler': [None], 'dim_reducer': [None], 'model': [RandomForestClassifier(n_jobs=-1)], 'model__n_estimators': np.arange(100, 501, 100),\n 'model__max_depth': np.arange(5, 26, 5)},\n \n # 5 PCAs * 5 numbers of estimators * 5 max depths = 150 models\n {'scaler': [None], 'dim_reducer': [PCA()], 'dim_reducer__n_components': np.arange(50, 251, 50), 'model': [RandomForestClassifier(n_jobs=-1)],\n 'model__n_estimators': np.arange(100, 501, 100), 'model__max_depth': np.arange(5, 26, 5)}\n]\n\n# Instantiate the rf grid search\nrf_grid_search = GridSearchCV(estimator=my_pipeline, param_grid=rf_param_grid, cv=5, n_jobs=-1, verbose=5)", "_____no_output_____" ], [ "# Fit the rf grid search\nfitted_rf_grid_em = rf_grid_search.fit(Xm_train, em_train)", "Fitting 5 folds for each of 150 candidates, totalling 750 fits\n" ], [ "# What was the best rf?\nfitted_rf_grid_em.best_estimator_", "_____no_output_____" ], [ "print(f\"The best random forest's accuracy on the training set: {fitted_rf_grid_em.score(Xm_train, em_train)*100}%\")\nprint(f\"The best random forest's accuracy on the test set: {fitted_rf_grid_em.score(Xm_test, em_test)*100}%\")", "The best random forest's accuracy on the training set: 100.0%\nThe best random forest's accuracy on the test set: 46.75925925925926%\n" ], [ "# # Parameter grid for XGBoost (scaling is unnecessary)\n# xgb_param_grid = [\n# # 5 numbers of estimators * 5 max depths = 25 models\n# {'scaler': [None], 'dim_reducer': [None], 'model': [XGBClassifier(n_jobs=-1)], 'model__n_estimators': np.arange(100, 501, 100),\n# 'model__max_depth': np.arange(5, 26, 5)},\n \n# # 3 PCAs * 5 numbers of estimators * 5 max depths = 75 models\n# # I am trying fewer PCAs for XGBoost\n# {'scaler': [None], 'dim_reducer': [PCA()], 'dim_reducer__n_components': [200, 250, 300], 'model': [XGBClassifier(n_jobs=-1)],\n# 'model__n_estimators': np.arange(100, 501, 100), 'model__max_depth': np.arange(5, 26, 5)}\n# ]\n\n# # Instantiate the XGB grid search\n# xgb_grid_search = GridSearchCV(estimator=my_pipeline, param_grid=xgb_param_grid, cv=5, n_jobs=-1, verbose=5)", "_____no_output_____" ], [ "# # Fit the XGB grid search\n# fitted_xgb_grid_em = xgb_grid_search.fit(Xm_train, em_train)", "Fitting 5 folds for each of 100 candidates, totalling 500 fits\n" ] ], [ [ "The above never finished so I decided to comment it out. I will try again without passing `n_jobs=-1` into `XGBClassifier()`, and with a higher number (10 instead of 5) for `verbose` in `GridSearchCV()`.", "_____no_output_____" ] ], [ [ "# Parameter grid for XGBoost (scaling is unnecessary)\nxgb_param_grid = [\n # 5 numbers of estimators * 5 max depths = 25 models\n {'scaler': [None], 'dim_reducer': [None], 'model': [XGBClassifier()], 'model__n_estimators': np.arange(100, 501, 100),\n 'model__max_depth': np.arange(5, 26, 5)},\n \n # 3 PCAs * 5 numbers of estimators * 5 max depths = 75 models\n # I am trying fewer PCAs for XGBoost\n {'scaler': [None], 'dim_reducer': [PCA()], 'dim_reducer__n_components': [200, 250, 300], 'model': [XGBClassifier()],\n 'model__n_estimators': np.arange(100, 501, 100), 'model__max_depth': np.arange(5, 26, 5)}\n]\n\n# Instantiate the XGB grid search\nxgb_grid_search = GridSearchCV(estimator=my_pipeline, param_grid=xgb_param_grid, cv=5, n_jobs=-1, verbose=10)", "_____no_output_____" ], [ "# Fit the XGB grid search\nfitted_xgb_grid_em = xgb_grid_search.fit(Xm_train, em_train)", "Fitting 5 folds for each of 100 candidates, totalling 500 fits\n" ], [ "# What was the best XGB model?\nfitted_xgb_grid_em.best_estimator_", "_____no_output_____" ], [ "print(f\"The best XGB model's accuracy on the training set: {fitted_xgb_grid_em.score(Xm_train, em_train)*100}%\")\nprint(f\"The best XGB model's accuracy on the test set: {fitted_xgb_grid_em.score(Xm_test, em_test)*100}%\")", "The best XGB model's accuracy on the training set: 100.0%\nThe best XGB model's accuracy on the test set: 51.388888888888886%\n" ], [ "# Parameter grid for KNN\nknn_param_grid = [\n # unscaled and scaled * 10 Ks = 20 models\n {'scaler': [None, StandardScaler()], 'dim_reducer': [None], 'model': [KNeighborsClassifier(n_jobs=-1)], 'model__n_neighbors': np.arange(3, 22, 2)},\n \n # unscaled\n # 5 PCAs * 10 Ks = 50 models\n {'scaler': [None], 'dim_reducer': [PCA()], 'dim_reducer__n_components': np.arange(50, 251, 50), 'model': [KNeighborsClassifier(n_jobs=-1)],\n 'model__n_neighbors': np.arange(3, 22, 2)},\n \n # scaled\n # 4 PCAs * 10 Ks = 40 models\n {'scaler': [StandardScaler()], 'dim_reducer': [PCA()], 'dim_reducer__n_components': np.arange(200, 351, 50), 'model': [KNeighborsClassifier(n_jobs=-1)],\n 'model__n_neighbors': np.arange(3, 22, 2)}\n]\n\n# Instantiate the grid search\nknn_grid_search = GridSearchCV(estimator=my_pipeline, param_grid=knn_param_grid, cv=5, n_jobs=-1, verbose=5)", "_____no_output_____" ], [ "# Fit the KNN grid search\nfitted_knn_grid_em = knn_grid_search.fit(Xm_train, em_train)", "Fitting 5 folds for each of 110 candidates, totalling 550 fits\n" ], [ "# What was the best KNN model?\nfitted_knn_grid_em.best_estimator_", "_____no_output_____" ], [ "print(f\"The best KNN model's accuracy on the training set: {fitted_knn_grid_em.score(Xm_train, em_train)*100}%\")\nprint(f\"The best KNN model's accuracy on the test set: {fitted_knn_grid_em.score(Xm_test, em_test)*100}%\")", "The best KNN model's accuracy on the training set: 76.29482071713147%\nThe best KNN model's accuracy on the test set: 45.370370370370374%\n" ] ], [ [ "### Conclusions for classifying emotions for males\n- Of the five classifier types I tried in my grid searches, SVM had the highest accuracy on the test set (60.19%), followed by logistic regression (58.80%), XGBoost (51.39%), random forest (46.76%), and lastly, KNN (45.37%).\n - Based on these results, I have pickled the best SVM and logistic regression. In part 4, I will try them on a new, male-only dataset.\n- Except for the best KNN model, all the best models found in the grid searches had training accuracies of 100%, indicating that they overfit to the training set.\n - The best KNN model had a training accuracy of 76.29%, but this was still much higher than its test accuracy of 45.37%.\n- For the classifier types in which scaling the features matters (logistic regression, SVM, and KNN), all the best models made use of the standard scaler.\n- Of the five best-in-type models, random forest and KNN were the only two which made use of principal components.", "_____no_output_____" ], [ "___\n# Building Models for Classifying Emotion for Females\nI will follow the same steps I took in classifying emotions for males, with one difference: This time I will not try XGBoost, due to its long computation time and comparatively low performance.", "_____no_output_____" ] ], [ [ "# Making a new dataframe that contains only female recordings\nravdess_mfcc_f_df = ravdess_mfcc_df[ravdess_mfcc_df['Gender'] == 'female'].reset_index().drop('index', axis=1)\nravdess_mfcc_f_df", "_____no_output_____" ], [ "# Splitting the dataframe into features and target\nXf = ravdess_mfcc_f_df.iloc[:, :-2]\nef = ravdess_mfcc_f_df['Emotion']", "_____no_output_____" ], [ "# Splitting the data into training and test sets\nXf_train, Xf_test, ef_train, ef_test = train_test_split(Xf, ef, test_size=0.3, stratify=ef, random_state=1)", "_____no_output_____" ], [ "# Checking the shapes\nprint(Xf_train.shape)\nprint(Xf_test.shape)\nprint(ef_train.shape)\nprint(ef_test.shape)", "(504, 2600)\n(216, 2600)\n(504,)\n(216,)\n" ] ], [ [ "Here is an initial model:", "_____no_output_____" ] ], [ [ "# Instantiate the model\ninitial_logreg_ef = LogisticRegression()\n\n# Fit to training set\ninitial_logreg_ef.fit(Xf_train, ef_train)\n\n# Score on training set\nprint(f'Model accuracy on training set: {initial_logreg_ef.score(Xf_train, ef_train)*100}%')\n\n# Score on test set\nprint(f'Model accuracy on test set: {initial_logreg_ef.score(Xf_test, ef_test)*100}%')", "Model accuracy on training set: 100.0%\nModel accuracy on test set: 68.98148148148148%\n" ] ], [ [ "The model has overfit to the training set yet again. Interestingly, this initial accuracy on the female test set is noticeably higher than the initial accuracy on the male test set, which was 56.48%. Again, let's evaluate the model further using a confusion matrix and a classification report.", "_____no_output_____" ] ], [ [ "# Having initial_logreg_ef make predictions based on the test set features\nef_pred = initial_logreg_ef.predict(Xf_test)\n\n# Building the confusion matrix as a dataframe\nemotions = ['angry', 'calm', 'disgusted', 'fearful', 'happy', 'neutral', 'sad', 'surprised']\nef_confusion_df = pd.DataFrame(confusion_matrix(ef_test, ef_pred))\nef_confusion_df.columns = [f'Predicted {emotion}' for emotion in emotions]\nef_confusion_df.index = [f'Actual {emotion}' for emotion in emotions]\nef_confusion_df", "_____no_output_____" ], [ "# Classification report\nprint(classification_report(ef_test, ef_pred))", " precision recall f1-score support\n\n angry 0.74 0.69 0.71 29\n calm 0.76 0.76 0.76 29\n disgusted 0.76 0.66 0.70 29\n fearful 0.55 0.62 0.58 29\n happy 0.71 0.76 0.73 29\n neutral 0.90 0.64 0.75 14\n sad 0.56 0.66 0.60 29\n surprised 0.74 0.71 0.73 28\n\n accuracy 0.69 216\n macro avg 0.71 0.69 0.70 216\nweighted avg 0.70 0.69 0.69 216\n\n" ] ], [ [ "It appears that the initial model is strongest at classifying calm voice clips, and weakest at classifying fearful voice clips. In order of strongest to weakest: calm, neutral, happy, surprised, angry, disgusted, sad, and fearful.\n\nThere is not as much variance in performance across the emotions when compared to that of the initial model for male emotions.", "_____no_output_____" ], [ "Although I found that none of the best male emotion classifiers made use of PCA, I will still examine the explained variance ratios like I did before.", "_____no_output_____" ] ], [ [ "# PCA on unscaled features\n\n# Instantiate PCA and fit to Xf_train\npca = PCA().fit(Xf_train)\n\n# Transform Xf_train\nXf_train_pca = pca.transform(Xf_train)\n\n# Transform Xf_test\nXf_test_pca = pca.transform(Xf_test)", "_____no_output_____" ], [ "# Standard scaling\n\n# Instantiate the scaler and fit to Xf_train\nscaler = StandardScaler().fit(Xf_train)\n\n# Transform Xf_train\nXf_train_scaled = scaler.transform(Xf_train)\n\n# Transform Xf_test\nXf_test_scaled = scaler.transform(Xf_test)", "_____no_output_____" ], [ "# PCA on scaled features\n\n# Instantiate PCA and fit to Xf_train_scaled\npca_scaled = PCA().fit(Xf_train_scaled)\n\n# Transform Xf_train_scaled\nXf_train_scaled_pca = pca_scaled.transform(Xf_train_scaled)\n\n# Transform Xf_test_scaled\nXf_test_scaled_pca = pca_scaled.transform(Xf_test_scaled)", "_____no_output_____" ], [ "# Plot the explained variance ratios\n\nplt.subplots(1, 2, figsize = (15, 5))\n\n# Unscaled\nplt.subplot(1, 2, 1)\nplt.bar(np.arange(1, len(pca.explained_variance_ratio_)+1), pca.explained_variance_ratio_)\nplt.xlabel('Principal Component')\nplt.ylabel('Explained Variance Ratio')\nplt.title('PCA on Unscaled Features')\nplt.ylim(top = 0.5) # Equalizing the y-axes\n\n# Scaled\nplt.subplot(1, 2, 2)\nplt.bar(np.arange(1, len(pca_scaled.explained_variance_ratio_)+1), pca_scaled.explained_variance_ratio_)\nplt.xlabel('Principal Component')\nplt.ylabel('Explained Variance Ratio')\nplt.title('PCA on Scaled Features')\nplt.ylim(top = 0.5) # Equalizing the y-axes\n\nplt.tight_layout()\nplt.show()", "_____no_output_____" ] ], [ [ "These are the same trends I saw previously for male emotions.\n\nHow much variance is explained by certain numbers of unscaled and scaled principal components? This will help me determine how many principal components to try in my grid searches later.", "_____no_output_____" ] ], [ [ "# Unscaled\nnum_components = [503, 451, 401, 351, 301, 251, 201, 151, 101, 51]\nfor n in num_components:\n print(f'Variance explained by {n-1} unscaled principal components: {np.round(np.sum(pca.explained_variance_ratio_[:n])*100, 2)}%')", "Variance explained by 502 unscaled principal components: 100.0%\nVariance explained by 450 unscaled principal components: 99.91%\nVariance explained by 400 unscaled principal components: 99.77%\nVariance explained by 350 unscaled principal components: 99.56%\nVariance explained by 300 unscaled principal components: 99.25%\nVariance explained by 250 unscaled principal components: 98.77%\nVariance explained by 200 unscaled principal components: 98.05%\nVariance explained by 150 unscaled principal components: 96.91%\nVariance explained by 100 unscaled principal components: 94.95%\nVariance explained by 50 unscaled principal components: 90.6%\n" ], [ "# Scaled\nnum_components = [503, 451, 401, 351, 301, 251, 201, 151, 101, 51]\nfor n in num_components:\n print(f'Variance explained by {n-1} scaled principal components: {np.round(np.sum(pca_scaled.explained_variance_ratio_[:n])*100, 2)}%')", "Variance explained by 502 scaled principal components: 100.0%\nVariance explained by 450 scaled principal components: 99.5%\nVariance explained by 400 scaled principal components: 98.7%\nVariance explained by 350 scaled principal components: 97.51%\nVariance explained by 300 scaled principal components: 95.82%\nVariance explained by 250 scaled principal components: 93.39%\nVariance explained by 200 scaled principal components: 89.88%\nVariance explained by 150 scaled principal components: 84.69%\nVariance explained by 100 scaled principal components: 76.61%\nVariance explained by 50 scaled principal components: 62.4%\n" ] ], [ [ "Like before, I will now do a grid search for each classifier type, with five-fold cross-validation to optimize the hyperparameters.", "_____no_output_____" ] ], [ [ "# Cache\ncachedir = mkdtemp()\n\n# Pipeline (these values are placeholders)\nmy_pipeline = Pipeline(steps=[('scaler', StandardScaler()), ('dim_reducer', PCA()), ('model', LogisticRegression())], memory=cachedir)", "_____no_output_____" ], [ "# Parameter grid for log reg\nlogreg_param_grid = [\n # l1 without PCA\n # unscaled and scaled * 9 regularization strengths = 18 models\n {'scaler': [None, StandardScaler()], 'dim_reducer': [None], 'model': [LogisticRegression(penalty='l1', n_jobs=-1)],\n 'model__C': [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000]},\n \n # l1 unscaled with PCA\n # 6 PCAs * 9 regularization strengths = 54 models\n {'scaler': [None], 'dim_reducer': [PCA()], 'dim_reducer__n_components': np.arange(50, 301, 50),\n 'model': [LogisticRegression(penalty='l1', n_jobs=-1)], 'model__C': [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000]},\n \n # l1 scaled with PCA\n # 4 PCAs * 9 regularization strengths = 36 models\n {'scaler': [StandardScaler()], 'dim_reducer': [PCA()], 'dim_reducer__n_components': np.arange(200, 351, 50),\n 'model': [LogisticRegression(penalty='l1', n_jobs=-1)], 'model__C': [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000]},\n \n # l2 (default) without PCA\n # unscaled and scaled * 9 regularization strengths = 18 models\n {'scaler': [None, StandardScaler()], 'dim_reducer': [None], 'model': [LogisticRegression(solver='lbfgs', n_jobs=-1)],\n 'model__C': [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000]},\n \n # l2 (default) unscaled with PCA\n # 6 PCAs * 9 regularization strengths = 54 models\n {'scaler': [None], 'dim_reducer': [PCA()], 'dim_reducer__n_components': np.arange(50, 301, 50),\n 'model': [LogisticRegression(solver='lbfgs', n_jobs=-1)], 'model__C': [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000]},\n \n # l2 (default) scaled with PCA\n # 4 PCAs * 9 regularization strengths = 36 models\n {'scaler': [StandardScaler()], 'dim_reducer': [PCA()], 'dim_reducer__n_components': np.arange(200, 351, 50),\n 'model': [LogisticRegression(solver='lbfgs', n_jobs=-1)], 'model__C': [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000]}\n]\n\n# Instantiate the log reg grid search\nlogreg_grid_search = GridSearchCV(estimator=my_pipeline, param_grid=logreg_param_grid, cv=5, n_jobs=-1, verbose=5)", "_____no_output_____" ], [ "# Fit the log reg grid search\nfitted_logreg_grid_ef = logreg_grid_search.fit(Xf_train, ef_train)", "Fitting 5 folds for each of 216 candidates, totalling 1080 fits\n" ], [ "# What was the best log reg?\nfitted_logreg_grid_ef.best_estimator_", "_____no_output_____" ], [ "print(f\"The best log reg's accuracy on the training set: {fitted_logreg_grid_ef.score(Xf_train, ef_train)*100}%\")\nprint(f\"The best log reg's accuracy on the test set: {fitted_logreg_grid_ef.score(Xf_test, ef_test)*100}%\")", "The best log reg's accuracy on the training set: 100.0%\nThe best log reg's accuracy on the test set: 71.29629629629629%\n" ], [ "# Parameter grid for SVM\nsvm_param_grid = [\n # unscaled and scaled * 9 regularization strengths = 18 models\n {'scaler': [None, StandardScaler()], 'dim_reducer': [None], 'model': [SVC()], 'model__C': [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000]},\n \n # unscaled\n # 6 PCAs * 9 regularization strengths = 54 models\n {'scaler': [None], 'dim_reducer': [PCA()], 'dim_reducer__n_components': np.arange(50, 301, 50), 'model': [SVC()],\n 'model__C':[0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000]},\n \n # scaled\n # 4 PCAs * 9 regularization strengths = 36 models\n {'scaler': [StandardScaler()], 'dim_reducer': [PCA()], 'dim_reducer__n_components': np.arange(200, 351, 50), 'model': [SVC()],\n 'model__C':[0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000]}\n]\n\n# Instantiate the SVM grid search\nsvm_grid_search = GridSearchCV(estimator=my_pipeline, param_grid=svm_param_grid, cv=5, n_jobs=-1, verbose=5)", "_____no_output_____" ], [ "# Fit the SVM grid search\nfitted_svm_grid_ef = svm_grid_search.fit(Xf_train, ef_train)", "Fitting 5 folds for each of 108 candidates, totalling 540 fits\n" ], [ "# What was the best SVM?\nfitted_svm_grid_ef.best_estimator_", "_____no_output_____" ], [ "print(f\"The best SVM's accuracy on the training set: {fitted_svm_grid_ef.score(Xf_train, ef_train)*100}%\")\nprint(f\"The best SVM's accuracy on the test set: {fitted_svm_grid_ef.score(Xf_test, ef_test)*100}%\")", "The best SVM's accuracy on the training set: 100.0%\nThe best SVM's accuracy on the test set: 70.83333333333334%\n" ], [ "# Parameter grid for random forest (scaling is unnecessary)\nrf_param_grid = [\n # 5 numbers of estimators * 5 max depths = 25 models\n {'scaler': [None], 'dim_reducer': [None], 'model': [RandomForestClassifier(n_jobs=-1)], 'model__n_estimators': np.arange(100, 501, 100),\n 'model__max_depth': np.arange(5, 26, 5)},\n \n # 6 PCAs * 5 numbers of estimators * 5 max depths = 150 models\n {'scaler': [None], 'dim_reducer': [PCA()], 'dim_reducer__n_components': np.arange(50, 301, 50), 'model': [RandomForestClassifier(n_jobs=-1)],\n 'model__n_estimators': np.arange(100, 501, 100), 'model__max_depth': np.arange(5, 26, 5)}\n]\n\n# Instantiate the rf grid search\nrf_grid_search = GridSearchCV(estimator=my_pipeline, param_grid=rf_param_grid, cv=5, n_jobs=-1, verbose=5)", "_____no_output_____" ], [ "# Fit the rf grid search\nfitted_rf_grid_ef = rf_grid_search.fit(Xf_train, ef_train)", "Fitting 5 folds for each of 175 candidates, totalling 875 fits\n" ], [ "# What was the best rf?\nfitted_rf_grid_ef.best_estimator_", "_____no_output_____" ], [ "print(f\"The best random forest's accuracy on the training set: {fitted_rf_grid_ef.score(Xf_train, ef_train)*100}%\")\nprint(f\"The best random forest's accuracy on the test set: {fitted_rf_grid_ef.score(Xf_test, ef_test)*100}%\")", "The best random forest's accuracy on the training set: 100.0%\nThe best random forest's accuracy on the test set: 61.57407407407407%\n" ], [ "# Parameter grid for KNN\nknn_param_grid = [\n # unscaled and scaled * 10 Ks = 20 models\n {'scaler': [None, StandardScaler()], 'dim_reducer': [None], 'model': [KNeighborsClassifier(n_jobs=-1)], 'model__n_neighbors': np.arange(3, 22, 2)},\n \n # unscaled\n # 6 PCAs * 10 Ks = 60 models\n {'scaler': [None], 'dim_reducer': [PCA()], 'dim_reducer__n_components': np.arange(50, 301, 50), 'model': [KNeighborsClassifier(n_jobs=-1)],\n 'model__n_neighbors': np.arange(3, 22, 2)},\n \n # scaled\n # 4 PCAs * 10 Ks = 40 models\n {'scaler': [StandardScaler()], 'dim_reducer': [PCA()], 'dim_reducer__n_components': np.arange(200, 351, 50), 'model': [KNeighborsClassifier(n_jobs=-1)],\n 'model__n_neighbors': np.arange(3, 22, 2)}\n]\n\n# Instantiate the grid search\nknn_grid_search = GridSearchCV(estimator=my_pipeline, param_grid=knn_param_grid, cv=5, n_jobs=-1, verbose=5)", "_____no_output_____" ], [ "# Fit the KNN grid search\nfitted_knn_grid_ef = knn_grid_search.fit(Xf_train, ef_train)", "Fitting 5 folds for each of 120 candidates, totalling 600 fits\n" ], [ "# What was the best KNN model?\nfitted_knn_grid_ef.best_estimator_", "_____no_output_____" ], [ "print(f\"The best KNN model's accuracy on the training set: {fitted_knn_grid_ef.score(Xf_train, ef_train)*100}%\")\nprint(f\"The best KNN model's accuracy on the test set: {fitted_knn_grid_ef.score(Xf_test, ef_test)*100}%\")", "The best KNN model's accuracy on the training set: 59.32539682539682%\nThe best KNN model's accuracy on the test set: 55.55555555555556%\n" ] ], [ [ "### Conclusions for classifying emotions for females\n- Of the four classifier types I tried in my grid searches, logistic regression had the highest accuracy on the test set (71.29%), followed by SVM (70.83%), random forest (61.57%), and lastly, KNN (55.56%).\n- Except for the best KNN model, all the best models found in the grid searches had training accuracies of 100%, indicating that they overfit to the training set.\n - The best KNN model had a training accuracy of 59.33%, which was not much higher than its test accuracy of 55.56%. A much wider gap was found in the best KNN model for male emotions.\n- For the classifier types in which scaling the features matters (logistic regression, SVM, and KNN), the best logistic regression and SVM models made use of the standard scaler, while the best KNN model did not.\n- All the best-in-type models made use of principal components, except SVM.\n- Interestingly, the female emotion classifiers achieved higher accuracies than their male counterparts. It appears that for the RAVDESS dataset, the differences between female emotions are greater the differences between male emotions.\n - Based on this alone, I cannot extrapolate and conclude that women are more socially more expressive than men are, although this is an interesting thought.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
4a7a88fb3c7d5f921a3644673f2718f1ad64df0b
25,503
ipynb
Jupyter Notebook
Tutorial_packing_sequences.ipynb
lasofivec/dataflowr
68697f57968c4b8bc2669375f257a55057125f96
[ "Apache-2.0" ]
null
null
null
Tutorial_packing_sequences.ipynb
lasofivec/dataflowr
68697f57968c4b8bc2669375f257a55057125f96
[ "Apache-2.0" ]
null
null
null
Tutorial_packing_sequences.ipynb
lasofivec/dataflowr
68697f57968c4b8bc2669375f257a55057125f96
[ "Apache-2.0" ]
null
null
null
38.758359
240
0.433322
[ [ [ "# Minimal tutorial on packing and unpacking sequences in PyTorch, aka how to use `pack_padded_sequence` and `pad_packed_sequence`\n\nThis is a jupyter version of [@Tushar-N 's gist](https://gist.github.com/Tushar-N/dfca335e370a2bc3bc79876e6270099e) with comments from [@Harsh Trivedi repo](https://github.com/HarshTrivedi/packing-unpacking-pytorch-minimal-tutorial)\n", "_____no_output_____" ] ], [ [ "# from https://github.com/HarshTrivedi/packing-unpacking-pytorch-minimal-tutorial\nimport torch\nfrom torch import LongTensor\nfrom torch.nn import Embedding, LSTM\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\n\n## We want to run LSTM on a batch of 3 character sequences ['long_str', 'tiny', 'medium']\n#\n# Step 1: Construct Vocabulary\n# Step 2: Load indexed data (list of instances, where each instance is list of character indices)\n# Step 3: Make Model\n# * Step 4: Pad instances with 0s till max length sequence\n# * Step 5: Sort instances by sequence length in descending order\n# * Step 6: Embed the instances\n# * Step 7: Call pack_padded_sequence with embeded instances and sequence lengths\n# * Step 8: Forward with LSTM\n# * Step 9: Call unpack_padded_sequences if required / or just pick last hidden vector\n# * Summary of Shape Transformations", "_____no_output_____" ], [ "# We want to run LSTM on a batch following 3 character sequences\nseqs = ['long_str', # len = 8\n 'tiny', # len = 4\n 'medium'] # len = 6", "_____no_output_____" ], [ "## Step 1: Construct Vocabulary ##\n##------------------------------##\n# make sure <pad> idx is 0\nvocab = ['<pad>'] + sorted(set([char for seq in seqs for char in seq]))\n# => ['<pad>', '_', 'd', 'e', 'g', 'i', 'l', 'm', 'n', 'o', 'r', 's', 't', 'u', 'y']", "_____no_output_____" ], [ "vocab", "_____no_output_____" ], [ "## Step 2: Load indexed data (list of instances, where each instance is list of character indices) ##\n##-------------------------------------------------------------------------------------------------##\nvectorized_seqs = [[vocab.index(tok) for tok in seq]for seq in seqs]\n# vectorized_seqs => [[6, 9, 8, 4, 1, 11, 12, 10],\n# [12, 5, 8, 14],\n# [7, 3, 2, 5, 13, 7]]", "_____no_output_____" ], [ "vectorized_seqs", "_____no_output_____" ], [ "## Step 3: Make Model ##\n##--------------------##\nembed = Embedding(len(vocab), 4) # embedding_dim = 4\nlstm = LSTM(input_size=4, hidden_size=5, batch_first=True) # input_dim = 4, hidden_dim = 5", "_____no_output_____" ], [ "## Step 4: Pad instances with 0s till max length sequence ##\n##--------------------------------------------------------##\n\n# get the length of each seq in your batch\nseq_lengths = LongTensor(list(map(len, vectorized_seqs)))\n# seq_lengths => [ 8, 4, 6]\n# batch_sum_seq_len: 8 + 4 + 6 = 18\n# max_seq_len: 8\n\nseq_tensor = Variable(torch.zeros((len(vectorized_seqs), seq_lengths.max()))).long()\n# seq_tensor => [[0 0 0 0 0 0 0 0]\n# [0 0 0 0 0 0 0 0]\n# [0 0 0 0 0 0 0 0]]\n\nfor idx, (seq, seqlen) in enumerate(zip(vectorized_seqs, seq_lengths)):\n seq_tensor[idx, :seqlen] = LongTensor(seq)\n# seq_tensor => [[ 6 9 8 4 1 11 12 10] # long_str\n# [12 5 8 14 0 0 0 0] # tiny\n# [ 7 3 2 5 13 7 0 0]] # medium\n# seq_tensor.shape : (batch_size X max_seq_len) = (3 X 8)", "_____no_output_____" ], [ "seq_lengths", "_____no_output_____" ], [ "seq_tensor", "_____no_output_____" ], [ "## Step 5: Sort instances by sequence length in descending order ##\n##---------------------------------------------------------------##\n\nseq_lengths, perm_idx = seq_lengths.sort(0, descending=True)\nseq_tensor = seq_tensor[perm_idx]\n# seq_tensor => [[ 6 9 8 4 1 11 12 10] # long_str\n# [ 7 3 2 5 13 7 0 0] # medium\n# [12 5 8 14 0 0 0 0]] # tiny\n# seq_tensor.shape : (batch_size X max_seq_len) = (3 X 8)", "_____no_output_____" ], [ "perm_idx", "_____no_output_____" ], [ "seq_tensor", "_____no_output_____" ], [ "## Step 6: Embed the instances ##\n##-----------------------------##\n\nembedded_seq_tensor = embed(seq_tensor)\n# embedded_seq_tensor =>\n# [[[-0.77578706 -1.8080667 -1.1168439 1.1059115 ] l\n# [-0.23622951 2.0361056 0.15435742 -0.04513785] o\n# [-0.6000342 1.1732816 0.19938554 -1.5976517 ] n\n# [ 0.40524676 0.98665565 -0.08621677 -1.1728264 ] g\n# [-1.6334635 -0.6100042 1.7509955 -1.931793 ] _\n# [-0.6470658 -0.6266589 -1.7463604 1.2675372 ] s\n# [ 0.64004815 0.45813003 0.3476034 -0.03451729] t\n# [-0.22739866 -0.45782727 -0.6643252 0.25129375]] r\n\n# [[ 0.16031227 -0.08209462 -0.16297023 0.48121014] m\n# [-0.7303265 -0.857339 0.58913064 -1.1068314 ] e\n# [ 0.48159844 -1.4886451 0.92639893 0.76906884] d\n# [ 0.27616557 -1.224429 -1.342848 -0.7495876 ] i\n# [ 0.01795524 -0.59048957 -0.53800726 -0.6611691 ] u\n# [ 0.16031227 -0.08209462 -0.16297023 0.48121014] m\n# [ 0.2691206 -0.43435425 0.87935454 -2.2269666 ] <pad>\n# [ 0.2691206 -0.43435425 0.87935454 -2.2269666 ]] <pad>\n\n# [[ 0.64004815 0.45813003 0.3476034 -0.03451729] t\n# [ 0.27616557 -1.224429 -1.342848 -0.7495876 ] i\n# [-0.6000342 1.1732816 0.19938554 -1.5976517 ] n\n# [-1.284392 0.68294704 1.4064184 -0.42879772] y\n# [ 0.2691206 -0.43435425 0.87935454 -2.2269666 ] <pad>\n# [ 0.2691206 -0.43435425 0.87935454 -2.2269666 ] <pad>\n# [ 0.2691206 -0.43435425 0.87935454 -2.2269666 ] <pad>\n# [ 0.2691206 -0.43435425 0.87935454 -2.2269666 ]]] <pad>\n# embedded_seq_tensor.shape : (batch_size X max_seq_len X embedding_dim) = (3 X 8 X 4)", "_____no_output_____" ], [ "embedded_seq_tensor", "_____no_output_____" ], [ "## Step 7: Call pack_padded_sequence with embeded instances and sequence lengths ##\n##-------------------------------------------------------------------------------##\n\npacked_input = pack_padded_sequence(embedded_seq_tensor, seq_lengths.cpu().numpy(), batch_first=True)\n# packed_input (PackedSequence is NamedTuple with 2 attributes: data and batch_sizes\n#\n# packed_input.data =>\n# [[-0.77578706 -1.8080667 -1.1168439 1.1059115 ] l\n# [ 0.01795524 -0.59048957 -0.53800726 -0.6611691 ] m\n# [-0.6470658 -0.6266589 -1.7463604 1.2675372 ] t\n# [ 0.16031227 -0.08209462 -0.16297023 0.48121014] o\n# [ 0.40524676 0.98665565 -0.08621677 -1.1728264 ] e\n# [-1.284392 0.68294704 1.4064184 -0.42879772] i\n# [ 0.64004815 0.45813003 0.3476034 -0.03451729] n\n# [ 0.27616557 -1.224429 -1.342848 -0.7495876 ] d\n# [ 0.64004815 0.45813003 0.3476034 -0.03451729] n\n# [-0.23622951 2.0361056 0.15435742 -0.04513785] g\n# [ 0.16031227 -0.08209462 -0.16297023 0.48121014] i\n# [-0.22739866 -0.45782727 -0.6643252 0.25129375]] y\n# [-0.7303265 -0.857339 0.58913064 -1.1068314 ] _\n# [-1.6334635 -0.6100042 1.7509955 -1.931793 ] u\n# [ 0.27616557 -1.224429 -1.342848 -0.7495876 ] s\n# [-0.6000342 1.1732816 0.19938554 -1.5976517 ] m\n# [-0.6000342 1.1732816 0.19938554 -1.5976517 ] t\n# [ 0.48159844 -1.4886451 0.92639893 0.76906884] r\n# packed_input.data.shape : (batch_sum_seq_len X embedding_dim) = (18 X 4)\n#\n# packed_input.batch_sizes => [ 3, 3, 3, 3, 2, 2, 1, 1]\n# visualization :\n# l o n g _ s t r #(long_str)\n# m e d i u m #(medium)\n# t i n y #(tiny)\n# 3 3 3 3 2 2 1 1 (sum = 18 [batch_sum_seq_len])", "_____no_output_____" ], [ "packed_input.data.shape", "_____no_output_____" ], [ "## Step 8: Forward with LSTM ##\n##---------------------------##\n\npacked_output, (ht, ct) = lstm(packed_input)\n# packed_output (PackedSequence is NamedTuple with 2 attributes: data and batch_sizes\n#\n# packed_output.data :\n# [[-0.00947162 0.07743231 0.20343193 0.29611713 0.07992904] l\n# [ 0.08596145 0.09205993 0.20892891 0.21788561 0.00624391] o\n# [ 0.16861682 0.07807446 0.18812777 -0.01148055 -0.01091915] n\n# [ 0.20994528 0.17932937 0.17748171 0.05025435 0.15717036] g\n# [ 0.01364102 0.11060348 0.14704391 0.24145307 0.12879576] _\n# [ 0.02610307 0.00965587 0.31438383 0.246354 0.08276576] s\n# [ 0.09527554 0.14521319 0.1923058 -0.05925677 0.18633027] t\n# [ 0.09872741 0.13324396 0.19446367 0.4307988 -0.05149471] r\n# [ 0.03895474 0.08449443 0.18839942 0.02205326 0.23149511] m\n# [ 0.14620507 0.07822411 0.2849248 -0.22616537 0.15480657] e\n# [ 0.00884941 0.05762182 0.30557525 0.373712 0.08834908] d\n# [ 0.12460691 0.21189159 0.04823487 0.06384943 0.28563985] i\n# [ 0.01368293 0.15872964 0.03759198 -0.13403234 0.23890573] u\n# [ 0.00377969 0.05943518 0.2961751 0.35107893 0.15148178] m\n# [ 0.00737647 0.17101538 0.28344846 0.18878219 0.20339936] t\n# [ 0.0864429 0.11173367 0.3158251 0.37537992 0.11876849] i\n# [ 0.17885767 0.12713005 0.28287745 0.05562563 0.10871304] n\n# [ 0.09486895 0.12772645 0.34048414 0.25930756 0.12044918]] y\n# packed_output.data.shape : (batch_sum_seq_len X hidden_dim) = (18 X 5)\n\n# packed_output.batch_sizes => [ 3, 3, 3, 3, 2, 2, 1, 1] (same as packed_input.batch_sizes)\n# visualization :\n# l o n g _ s t r #(long_str)\n# m e d i u m #(medium)\n# t i n y #(tiny)\n# 3 3 3 3 2 2 1 1 (sum = 18 [batch_sum_seq_len])", "_____no_output_____" ], [ "packed_output.data.shape", "_____no_output_____" ], [ "ht", "_____no_output_____" ], [ "ct", "_____no_output_____" ], [ "## Step 9: Call unpack_padded_sequences if required / or just pick last hidden vector ##\n##------------------------------------------------------------------------------------##\n\n# unpack your output if required\noutput, input_sizes = pad_packed_sequence(packed_output, batch_first=True)\n# output:\n# output =>\n# [[[-0.00947162 0.07743231 0.20343193 0.29611713 0.07992904] l\n# [ 0.20994528 0.17932937 0.17748171 0.05025435 0.15717036] o\n# [ 0.09527554 0.14521319 0.1923058 -0.05925677 0.18633027] n\n# [ 0.14620507 0.07822411 0.2849248 -0.22616537 0.15480657] g\n# [ 0.01368293 0.15872964 0.03759198 -0.13403234 0.23890573] _\n# [ 0.00737647 0.17101538 0.28344846 0.18878219 0.20339936] s\n# [ 0.17885767 0.12713005 0.28287745 0.05562563 0.10871304] t\n# [ 0.09486895 0.12772645 0.34048414 0.25930756 0.12044918]] r\n\n# [[ 0.08596145 0.09205993 0.20892891 0.21788561 0.00624391] m\n# [ 0.01364102 0.11060348 0.14704391 0.24145307 0.12879576] e\n# [ 0.09872741 0.13324396 0.19446367 0.4307988 -0.05149471] d\n# [ 0.00884941 0.05762182 0.30557525 0.373712 0.08834908] i\n# [ 0.00377969 0.05943518 0.2961751 0.35107893 0.15148178] u\n# [ 0.0864429 0.11173367 0.3158251 0.37537992 0.11876849] m\n# [ 0. 0. 0. 0. 0. ] <pad>\n# [ 0. 0. 0. 0. 0. ]] <pad>\n\n# [[ 0.16861682 0.07807446 0.18812777 -0.01148055 -0.01091915] t\n# [ 0.02610307 0.00965587 0.31438383 0.246354 0.08276576] i\n# [ 0.03895474 0.08449443 0.18839942 0.02205326 0.23149511] n\n# [ 0.12460691 0.21189159 0.04823487 0.06384943 0.28563985] y\n# [ 0. 0. 0. 0. 0. ] <pad>\n# [ 0. 0. 0. 0. 0. ] <pad>\n# [ 0. 0. 0. 0. 0. ] <pad>\n# [ 0. 0. 0. 0. 0. ]]] <pad>\n# output.shape : ( batch_size X max_seq_len X hidden_dim) = (3 X 8 X 5)", "_____no_output_____" ], [ "output", "_____no_output_____" ], [ "# Or if you just want the final hidden state?\nprint(ht[-1])\n\n## Summary of Shape Transformations ##\n##----------------------------------##\n\n# (batch_size X max_seq_len X embedding_dim) --> Sort by seqlen ---> (batch_size X max_seq_len X embedding_dim)\n# (batch_size X max_seq_len X embedding_dim) ---> Pack ---> (batch_sum_seq_len X embedding_dim)\n# (batch_sum_seq_len X embedding_dim) ---> LSTM ---> (batch_sum_seq_len X hidden_dim)\n# (batch_sum_seq_len X hidden_dim) ---> UnPack ---> (batch_size X max_seq_len X hidden_dim)", "tensor([[ 0.1419, 0.0881, -0.0310, -0.0113, 0.0953],\n [ 0.1500, -0.0534, -0.1781, -0.3375, 0.0809],\n [ 0.2166, 0.0640, -0.0107, -0.1845, 0.2264]],\n grad_fn=<SelectBackward>)\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a7aa83bd51aa9bd9be73eb35d7b810d6761f2c5
11,391
ipynb
Jupyter Notebook
FaceEmotionRecognition/predict-emotion-using-webcam.ipynb
manavkapadnis/ML-Reserve
01aaa252bce26c943bc689bc649cb05a115c0b09
[ "MIT" ]
12
2021-09-11T09:44:23.000Z
2022-03-12T09:16:53.000Z
FaceEmotionRecognition/predict-emotion-using-webcam.ipynb
HemanthSai7/ML-Reserve
26b583f58135fcabb65e815c85760c5eea70f2a0
[ "MIT" ]
54
2021-09-11T09:48:07.000Z
2022-01-31T05:38:12.000Z
FaceEmotionRecognition/predict-emotion-using-webcam.ipynb
HemanthSai7/ML-Reserve
26b583f58135fcabb65e815c85760c5eea70f2a0
[ "MIT" ]
39
2021-09-11T09:44:26.000Z
2022-03-12T09:16:55.000Z
46.304878
2,223
0.58476
[ [ [ "! pip install opencv-python", "Collecting opencv-python\n Downloading https://files.pythonhosted.org/packages/70/a8/e52a82936be6d5696fb06c78450707c26dc13df91bb6bf49583bb9abbaa0/opencv_python-4.5.1.48-cp37-cp37m-win_amd64.whl (34.9MB)\nRequirement already satisfied: numpy>=1.14.5 in d:\\anaconda\\installed_files\\lib\\site-packages (from opencv-python) (1.16.5)\nInstalling collected packages: opencv-python\nSuccessfully installed opencv-python-4.5.1.48\n" ], [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\n#tensorflow packages\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras.preprocessing import image", "_____no_output_____" ], [ "# Face Emotion Recognition\n#Here i am using my trained model, that is trained and saved as a h5 file\nfaceDetection_model = 'D:\\pavi\\DeepLearningProjects\\Face_Emosion_Recognition\\pretrained_model\\Face_Detection_TrainedModel\\haarcascade_frontalface_default.xml'", "_____no_output_____" ], [ "Emotion_Detction_model = 'D:\\pavi\\DeepLearningProjects\\Face_Emosion_Recognition\\pretrained_model\\Face_Emotion_model\\FER_vggnet.h5'\nvggnet = load_model(Emotion_Detction_model)\nvggnet.summary()", "Model: \"sequential_1\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d_6 (Conv2D) (None, 48, 48, 64) 1664 \n_________________________________________________________________\nconv2d_7 (Conv2D) (None, 48, 48, 64) 102464 \n_________________________________________________________________\nbatch_normalization_3 (Batch (None, 48, 48, 64) 256 \n_________________________________________________________________\nmax_pooling2d_3 (MaxPooling2 (None, 24, 24, 64) 0 \n_________________________________________________________________\nconv2d_8 (Conv2D) (None, 24, 24, 128) 73856 \n_________________________________________________________________\nconv2d_9 (Conv2D) (None, 24, 24, 128) 147584 \n_________________________________________________________________\nbatch_normalization_4 (Batch (None, 24, 24, 128) 512 \n_________________________________________________________________\nmax_pooling2d_4 (MaxPooling2 (None, 12, 12, 128) 0 \n_________________________________________________________________\nconv2d_10 (Conv2D) (None, 12, 12, 256) 295168 \n_________________________________________________________________\nconv2d_11 (Conv2D) (None, 12, 12, 256) 590080 \n_________________________________________________________________\nbatch_normalization_5 (Batch (None, 12, 12, 256) 1024 \n_________________________________________________________________\nmax_pooling2d_5 (MaxPooling2 (None, 6, 6, 256) 0 \n_________________________________________________________________\nflatten_1 (Flatten) (None, 9216) 0 \n_________________________________________________________________\ndense_3 (Dense) (None, 1024) 9438208 \n_________________________________________________________________\ndropout_2 (Dropout) (None, 1024) 0 \n_________________________________________________________________\ndense_4 (Dense) (None, 1024) 1049600 \n_________________________________________________________________\ndropout_3 (Dropout) (None, 1024) 0 \n_________________________________________________________________\ndense_5 (Dense) (None, 7) 7175 \n=================================================================\nTotal params: 11,707,591\nTrainable params: 11,706,695\nNon-trainable params: 896\n_________________________________________________________________\n" ], [ "#defining the emotion classes for classification\nclasses = np.array((\"Angry\", \"Disgust\", \"Fear\", \"Happy\", \"Sad\", \"Surprise\", \"Neutral\"))\n\n#video capturing and classifing\n\nfaceCascade = cv2.CascadeClassifier(faceDetection_model)\nvideo_capture = cv2.VideoCapture(0)\n\nwhile True:\n ret,frame = video_capture.read()\n \n cv2.imshow('Original Video' , frame)\n \n gray = cv2.cvtColor(frame , cv2.COLOR_BGR2GRAY)\n \n face = faceCascade.detectMultiScale(gray ,scaleFactor=1.1 , minNeighbors=5,)\n \n #draw rectangle around the face and cut the face only\n for (x,y,w,h) in face:\n \n cv2.rectangle( frame , (x,y) , (x+w , y+h) , (0,255,255) , 2)\n face_img = gray[ y:(y+h) , x:(x+w)]\n x = cv2.resize(face_img, (48,48) , interpolation = cv2.INTER_AREA)\n \n if np.sum([x])!=0:\n #preprocessing\n x = x.astype('float')/255.0 \n x = image.img_to_array(x)\n x = np.expand_dims(x , axis = 0)\n \n \n #face_img = face_img.reshape(48,48)\n \n # prediction\n p = vggnet.predict(x)\n a = np.argmax(p,axis=1)\n print('prediction',classes[a])\n label = str(classes[a][0])\n print(label)\n label_position = (x-10,y-10)\n \n fontScale = 0.6\n thickness = 3\n cv2.putText(frame , label , label_position , cv2.FONT_HERSHEY_SIMPLEX , fontScale , (0,255,0) , thickness , cv2.LINE_AA)\n else:\n cv2.putText(frame , 'No Face Detection' , label_position , cv2.FONT_HERSHEY_SIMPLEX , 0.6 , (0,255,0) , 3 ,cv2.LINE_AA)\n \n #cv2.imshow('croped image' , face_img)\n #display the resulting frame \n \n cv2.imshow('Face Detected Video' , frame)\n \n #break the capturing\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n \nvideo_capture.release()\ncv2.destroyAllWindows()\n", "prediction ['Sad']\nSad\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
4a7abc7b9038b32c2b6ea4b0aef42a58c1b90b24
157,117
ipynb
Jupyter Notebook
1. Bi-LSTM/ThreeX_1D_combine_3X.ipynb
nikhil-mathews/MastersPr_Predicting-Human-Pathogen-PPIs-using-Natural-Language-Processing-methods
78bbaaf5e4e52939a522fe14aedbf5acfd29e10c
[ "MIT" ]
null
null
null
1. Bi-LSTM/ThreeX_1D_combine_3X.ipynb
nikhil-mathews/MastersPr_Predicting-Human-Pathogen-PPIs-using-Natural-Language-Processing-methods
78bbaaf5e4e52939a522fe14aedbf5acfd29e10c
[ "MIT" ]
null
null
null
1. Bi-LSTM/ThreeX_1D_combine_3X.ipynb
nikhil-mathews/MastersPr_Predicting-Human-Pathogen-PPIs-using-Natural-Language-Processing-methods
78bbaaf5e4e52939a522fe14aedbf5acfd29e10c
[ "MIT" ]
null
null
null
157,117
157,117
0.8994
[ [ [ "import pandas as pd\ntry:\n import pickle5 as pickle\nexcept:\n !pip install pickle5\n import pickle5 as pickle\nimport os\nimport sys\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.layers import Dense, Input, GlobalMaxPooling1D, Flatten\nfrom keras.layers import Conv1D, MaxPooling1D, Embedding, Concatenate, Lambda\nfrom keras.models import Model\nfrom sklearn.metrics import roc_auc_score,roc_curve, auc\nfrom numpy import random\nfrom keras.layers import LSTM, Bidirectional, GlobalMaxPool1D, Dropout\nfrom keras.optimizers import Adam\nfrom keras.utils.vis_utils import plot_model\nimport seaborn as sns\n\nimport sys\nsys.path.insert(0,'/content/drive/MyDrive/ML_Data/')\nimport functions as f", "Collecting pickle5\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/f7/4c/5c4dd0462c8d3a6bc4af500a6af240763c2ebd1efdc736fc2c946d44b70a/pickle5-0.0.11.tar.gz (132kB)\n\r\u001b[K |██▌ | 10kB 16.5MB/s eta 0:00:01\r\u001b[K |█████ | 20kB 16.7MB/s eta 0:00:01\r\u001b[K |███████▍ | 30kB 9.6MB/s eta 0:00:01\r\u001b[K |██████████ | 40kB 7.8MB/s eta 0:00:01\r\u001b[K |████████████▍ | 51kB 8.5MB/s eta 0:00:01\r\u001b[K |██████████████▉ | 61kB 8.4MB/s eta 0:00:01\r\u001b[K |█████████████████▍ | 71kB 8.7MB/s eta 0:00:01\r\u001b[K |███████████████████▉ | 81kB 8.2MB/s eta 0:00:01\r\u001b[K |██████████████████████▎ | 92kB 8.1MB/s eta 0:00:01\r\u001b[K |████████████████████████▉ | 102kB 8.1MB/s eta 0:00:01\r\u001b[K |███████████████████████████▎ | 112kB 8.1MB/s eta 0:00:01\r\u001b[K |█████████████████████████████▊ | 122kB 8.1MB/s eta 0:00:01\r\u001b[K |████████████████████████████████| 133kB 8.1MB/s \n\u001b[?25hBuilding wheels for collected packages: pickle5\n Building wheel for pickle5 (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for pickle5: filename=pickle5-0.0.11-cp37-cp37m-linux_x86_64.whl size=219247 sha256=0657aecf24e6ee5e4a88770bc5263adc04164cc043da691dad422289c67c7a13\n Stored in directory: /root/.cache/pip/wheels/a6/90/95/f889ca4aa8b0e0c7f21c8470b6f5d6032f0390a3a141a9a3bd\nSuccessfully built pickle5\nInstalling collected packages: pickle5\nSuccessfully installed pickle5-0.0.11\n" ], [ "\ndef load_data(randomize=False):\n try:\n with open(\"/content/drive/MyDrive/ML_Data/hyppi-train.pkl\", \"rb\") as fh:\n df_train = pickle.load(fh)\n except:\n df_train = pd.read_pickle(\"C:/Users/nik00/py/proj/hyppi-train.pkl\")\n try:\n with open(\"/content/drive/MyDrive/ML_Data/hyppi-independent.pkl\", \"rb\") as fh:\n df_test = pickle.load(fh)\n except:\n df_test = pd.read_pickle(\"C:/Users/nik00/py/proj/hyppi-independent.pkl\")\n if randomize:\n return shuff_together(df_train,df_test)\n else:\n return df_train,df_test\n\ndf_train,df_test = load_data()\nprint('The data used will be:')\ndf_train[['Human','Yersinia']]", "The data used will be:\n" ], [ "lengths = sorted(len(s) for s in df_train['Human'])\nprint(\"Median length of Human sequence is\",lengths[len(lengths)//2])\n_ = sns.displot(lengths)\n_=plt.title(\"Most Human sequences seem to be less than 2000 in length\")", "Median length of Human sequence is 479\n" ], [ "lengths = sorted(len(s) for s in df_train['Yersinia'])\nprint(\"Median length of Yersinia sequence is\",lengths[len(lengths)//2])\n_ = sns.displot(lengths)\n_=plt.title(\"Most Yersinia sequences seem to be less than 1000 in length\")", "Median length of Yersinia sequence is 336\n" ], [ "data_1D_join_pre,data_test_1D_join_pre,num_words_1D_join,MAX_SEQUENCE_LENGTH_1D_J,MAX_VOCAB_SIZE_1D = f.get_seq_data_join(1000,1000,df_train,df_test, pad='pre', show=True)", "MAX_VOCAB_SIZE is 1000\nMAX_SEQUENCE_LENGTH is 1000\nmax sequence_data length: 8966\nmin sequence_data length: 107\nmedian sequence_data length: 881\n" ], [ "data_1D_join_center,data_test_1D_join_center,num_words_1D_join,MAX_SEQUENCE_LENGTH_1D_J,MAX_VOCAB_SIZE_1D = f.get_seq_data_join(1000,1000,df_train,df_test, pad='center')", "MAX_VOCAB_SIZE is 1000\nMAX_SEQUENCE_LENGTH is 1000\nmax sequence_data length: 8966\nmin sequence_data length: 107\nmedian sequence_data length: 881\nmax word index: 20\nFound 20 unique tokens.\nCenter padding.\nShape of data tensor: (6270, 1000)\nmax sequences_test length: 6126\nmin sequences_test length: 134\nmedian sequences_test length: 901\nCenter padding for test seq.\nShape of data_test tensor: (1514, 1000)\nnum_words is 21\n" ], [ "data_1D_join_post,data_test_1D_join_post,num_words_1D_join,MAX_SEQUENCE_LENGTH_1D_J,MAX_VOCAB_SIZE_1D = f.get_seq_data_join(1000,1000,df_train,df_test, pad='post')", "MAX_VOCAB_SIZE is 1000\nMAX_SEQUENCE_LENGTH is 1000\nmax sequence_data length: 8966\nmin sequence_data length: 107\nmedian sequence_data length: 881\nmax word index: 20\nFound 20 unique tokens.\npost padding.\nShape of data tensor: (6270, 1000)\nmax sequences_test length: 6126\nmin sequences_test length: 134\nmedian sequences_test length: 901\npost padding for test seq.\nShape of data_test tensor: (1514, 1000)\nnum_words is 21\n" ], [ "data1_1D_doubleip_pre,data2_1D_doubleip_pre,data1_test_1D_doubleip_pre,data2_test_1D_doubleip_pre,num_words_1D,MAX_SEQUENCE_LENGTH_1D_dIP,MAX_VOCAB_SIZE_1D = f.get_seq_data_doubleip(100,1000,df_train,df_test,pad = 'pre', show=True)", "MAX_VOCAB_SIZE is 100\nMAX_SEQUENCE_LENGTH is 1000\nmax sequences1_train length: 8797\nmin sequences1_train length: 41\nmedian sequences1_train length: 479\n" ], [ "data1_1D_doubleip_center,data2_1D_doubleip_center,data1_test_1D_doubleip_center,data2_test_1D_doubleip_center,num_words_1D,MAX_SEQUENCE_LENGTH_1D_dIP,MAX_VOCAB_SIZE_1D = f.get_seq_data_doubleip(100,1000,df_train,df_test)", "MAX_VOCAB_SIZE is 100\nMAX_SEQUENCE_LENGTH is 1000\nmax sequences1_train length: 8797\nmin sequences1_train length: 41\nmedian sequences1_train length: 479\nmax word index sequences1_train: 20\nmax sequences2_train length: 3710\nmin sequences2_train length: 35\nmedian sequences2_train length: 336\nmax word index sequences2_train: 20\nFound 20 unique tokens in tokenizer1.\nFound 20 unique tokens in tokenizer2.\nCenter padding\nShape of data1 tensor: (6270, 1000)\nShape of data2 tensor: (6270, 1000)\nmax test_sequences1 length: 5635\nmin test_sequences1 length: 39\nmedian test_sequences1 length: 496\nmax test_sequences2 length: 3710\nmin test_sequences2 length: 35\nmedian test_sequences2 length: 331\nCenter padding for test seq.\nShape of test_data1 tensor: (1514, 1000)\nShape of test_data2 tensor: (1514, 1000)\nnum_words is 21\n" ], [ "data1_1D_doubleip_post,data2_1D_doubleip_post,data1_test_1D_doubleip_post,data2_test_1D_doubleip_post,num_words_1D,MAX_SEQUENCE_LENGTH_1D_dIP,MAX_VOCAB_SIZE_1D = f.get_seq_data_doubleip(100,1000,df_train,df_test,pad = 'post')", "MAX_VOCAB_SIZE is 100\nMAX_SEQUENCE_LENGTH is 1000\nmax sequences1_train length: 8797\nmin sequences1_train length: 41\nmedian sequences1_train length: 479\nmax word index sequences1_train: 20\nmax sequences2_train length: 3710\nmin sequences2_train length: 35\nmedian sequences2_train length: 336\nmax word index sequences2_train: 20\nFound 20 unique tokens in tokenizer1.\nFound 20 unique tokens in tokenizer2.\npost padding\nShape of data1 tensor: (6270, 1000)\nShape of data2 tensor: (6270, 1000)\nmax test_sequences1 length: 5635\nmin test_sequences1 length: 39\nmedian test_sequences1 length: 496\nmax test_sequences2 length: 3710\nmin test_sequences2 length: 35\nmedian test_sequences2 length: 331\npost padding for test seq.\nShape of test_data1 tensor: (1514, 1000)\nShape of test_data2 tensor: (1514, 1000)\nnum_words is 21\n" ], [ "EMBEDDING_DIM_1D = 5\nDROP = 0.2\nBATCH_SIZE = 128\nEPOCHS = 50\nM_1D=10\n\n\n\nx1_join = f.BiLSTM_model(MAX_SEQUENCE_LENGTH_1D_J,EMBEDDING_DIM_1D,num_words_1D,M_1D,DROP)\nx2_join = f.BiLSTM_model(MAX_SEQUENCE_LENGTH_1D_J,EMBEDDING_DIM_1D,num_words_1D,M_1D,DROP)\nx3_join = f.BiLSTM_model(MAX_SEQUENCE_LENGTH_1D_J,EMBEDDING_DIM_1D,num_words_1D,M_1D,DROP)\n\nx1_doubleip = f.BiLSTM_model(MAX_SEQUENCE_LENGTH_1D_dIP,EMBEDDING_DIM_1D,num_words_1D,M_1D,DROP)\nx2_doubleip = f.BiLSTM_model(MAX_SEQUENCE_LENGTH_1D_dIP,EMBEDDING_DIM_1D,num_words_1D,M_1D,DROP)\nx3_doubleip = f.BiLSTM_model(MAX_SEQUENCE_LENGTH_1D_dIP,EMBEDDING_DIM_1D,num_words_1D,M_1D,DROP)\nx4_doubleip = f.BiLSTM_model(MAX_SEQUENCE_LENGTH_1D_dIP,EMBEDDING_DIM_1D,num_words_1D,M_1D,DROP)\nx5_doubleip = f.BiLSTM_model(MAX_SEQUENCE_LENGTH_1D_dIP,EMBEDDING_DIM_1D,num_words_1D,M_1D,DROP)\nx6_doubleip = f.BiLSTM_model(MAX_SEQUENCE_LENGTH_1D_dIP,EMBEDDING_DIM_1D,num_words_1D,M_1D,DROP)\n\nconcatenator = Concatenate(axis=1)\nx = concatenator([x1_join.output, x2_join.output, x3_join.output, x1_doubleip.output, x2_doubleip.output, x3_doubleip.output, x4_doubleip.output, x5_doubleip.output, x6_doubleip.output])\nx = Dense(128)(x)\nx = Dropout(0.2)(x)\noutput = Dense(1, activation=\"sigmoid\",name=\"Final\")(x)\nmodel1D_combine = Model(inputs=[x1_join.input, x2_join.input, x3_join.input, x1_doubleip.input, x2_doubleip.input, x3_doubleip.input, x4_doubleip.input, x5_doubleip.input, x6_doubleip.input], outputs=output)\n\nmodel1D_combine.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n\ntrains = [data_1D_join_pre,data_1D_join_center,data_1D_join_post, data1_1D_doubleip_pre,data1_1D_doubleip_center,data1_1D_doubleip_post, data2_1D_doubleip_pre,data2_1D_doubleip_center,data2_1D_doubleip_post]\ntests = [data_test_1D_join_pre,data_test_1D_join_center,data_test_1D_join_post, data1_test_1D_doubleip_pre,data1_test_1D_doubleip_center,data1_test_1D_doubleip_post, data2_test_1D_doubleip_pre,data2_test_1D_doubleip_center,data2_test_1D_doubleip_post]\n\nmodel1D_combine.fit(trains, df_train['label'].values, epochs=EPOCHS, validation_data=(tests,df_test['label'].values),batch_size=BATCH_SIZE)\nprint(roc_auc_score(df_test['label'].values, model1D_combine.predict(tests)))\n", "Epoch 1/50\n49/49 [==============================] - 88s 1s/step - loss: 0.6882 - accuracy: 0.5579 - val_loss: 0.6858 - val_accuracy: 0.5132\nEpoch 2/50\n49/49 [==============================] - 50s 1s/step - loss: 0.6262 - accuracy: 0.6523 - val_loss: 0.7682 - val_accuracy: 0.5073\nEpoch 3/50\n49/49 [==============================] - 50s 1s/step - loss: 0.6235 - accuracy: 0.6504 - val_loss: 0.7002 - val_accuracy: 0.5304\nEpoch 4/50\n49/49 [==============================] - 51s 1s/step - loss: 0.6085 - accuracy: 0.6575 - val_loss: 0.7299 - val_accuracy: 0.5291\nEpoch 5/50\n49/49 [==============================] - 52s 1s/step - loss: 0.6004 - accuracy: 0.6773 - val_loss: 0.7319 - val_accuracy: 0.5297\nEpoch 6/50\n49/49 [==============================] - 52s 1s/step - loss: 0.5858 - accuracy: 0.6933 - val_loss: 0.6596 - val_accuracy: 0.5845\nEpoch 7/50\n49/49 [==============================] - 52s 1s/step - loss: 0.5876 - accuracy: 0.6779 - val_loss: 0.6844 - val_accuracy: 0.5621\nEpoch 8/50\n49/49 [==============================] - 52s 1s/step - loss: 0.5815 - accuracy: 0.6951 - val_loss: 0.7046 - val_accuracy: 0.5436\nEpoch 9/50\n49/49 [==============================] - 52s 1s/step - loss: 0.5781 - accuracy: 0.6959 - val_loss: 0.6315 - val_accuracy: 0.6308\nEpoch 10/50\n49/49 [==============================] - 52s 1s/step - loss: 0.5597 - accuracy: 0.7127 - val_loss: 0.6187 - val_accuracy: 0.6420\nEpoch 11/50\n49/49 [==============================] - 52s 1s/step - loss: 0.5642 - accuracy: 0.7077 - val_loss: 0.5914 - val_accuracy: 0.6783\nEpoch 12/50\n49/49 [==============================] - 52s 1s/step - loss: 0.5654 - accuracy: 0.7128 - val_loss: 0.6280 - val_accuracy: 0.6341\nEpoch 13/50\n49/49 [==============================] - 52s 1s/step - loss: 0.5495 - accuracy: 0.7141 - val_loss: 0.6292 - val_accuracy: 0.6281\nEpoch 14/50\n49/49 [==============================] - 52s 1s/step - loss: 0.5498 - accuracy: 0.7192 - val_loss: 0.5853 - val_accuracy: 0.6849\nEpoch 15/50\n49/49 [==============================] - 52s 1s/step - loss: 0.5350 - accuracy: 0.7307 - val_loss: 0.5657 - val_accuracy: 0.7266\nEpoch 16/50\n49/49 [==============================] - 52s 1s/step - loss: 0.5519 - accuracy: 0.7182 - val_loss: 0.5596 - val_accuracy: 0.7199\nEpoch 17/50\n49/49 [==============================] - 52s 1s/step - loss: 0.5458 - accuracy: 0.7315 - val_loss: 0.5515 - val_accuracy: 0.7325\nEpoch 18/50\n49/49 [==============================] - 52s 1s/step - loss: 0.5430 - accuracy: 0.7131 - val_loss: 0.5641 - val_accuracy: 0.7028\nEpoch 19/50\n49/49 [==============================] - 52s 1s/step - loss: 0.5357 - accuracy: 0.7222 - val_loss: 0.5625 - val_accuracy: 0.7048\nEpoch 20/50\n49/49 [==============================] - 52s 1s/step - loss: 0.5363 - accuracy: 0.7268 - val_loss: 0.5442 - val_accuracy: 0.7365\nEpoch 21/50\n49/49 [==============================] - 52s 1s/step - loss: 0.5233 - accuracy: 0.7420 - val_loss: 0.5411 - val_accuracy: 0.7404\nEpoch 22/50\n49/49 [==============================] - 52s 1s/step - loss: 0.5180 - accuracy: 0.7399 - val_loss: 0.5408 - val_accuracy: 0.7285\nEpoch 23/50\n49/49 [==============================] - 52s 1s/step - loss: 0.5297 - accuracy: 0.7379 - val_loss: 0.5569 - val_accuracy: 0.7153\nEpoch 24/50\n49/49 [==============================] - 52s 1s/step - loss: 0.5166 - accuracy: 0.7416 - val_loss: 0.6112 - val_accuracy: 0.6546\nEpoch 25/50\n49/49 [==============================] - 52s 1s/step - loss: 0.5241 - accuracy: 0.7424 - val_loss: 0.5391 - val_accuracy: 0.7272\nEpoch 26/50\n49/49 [==============================] - 52s 1s/step - loss: 0.5114 - accuracy: 0.7385 - val_loss: 0.5290 - val_accuracy: 0.7312\nEpoch 27/50\n49/49 [==============================] - 51s 1s/step - loss: 0.4963 - accuracy: 0.7499 - val_loss: 0.5370 - val_accuracy: 0.7338\nEpoch 28/50\n49/49 [==============================] - 52s 1s/step - loss: 0.4997 - accuracy: 0.7536 - val_loss: 0.5303 - val_accuracy: 0.7431\nEpoch 29/50\n49/49 [==============================] - 52s 1s/step - loss: 0.4809 - accuracy: 0.7699 - val_loss: 0.5204 - val_accuracy: 0.7378\nEpoch 30/50\n49/49 [==============================] - 52s 1s/step - loss: 0.4848 - accuracy: 0.7613 - val_loss: 0.5121 - val_accuracy: 0.7517\nEpoch 31/50\n49/49 [==============================] - 52s 1s/step - loss: 0.4876 - accuracy: 0.7663 - val_loss: 0.5178 - val_accuracy: 0.7318\nEpoch 32/50\n49/49 [==============================] - 52s 1s/step - loss: 0.4801 - accuracy: 0.7662 - val_loss: 0.5128 - val_accuracy: 0.7305\nEpoch 33/50\n49/49 [==============================] - 52s 1s/step - loss: 0.4691 - accuracy: 0.7630 - val_loss: 0.5530 - val_accuracy: 0.7041\nEpoch 34/50\n49/49 [==============================] - 52s 1s/step - loss: 0.4759 - accuracy: 0.7710 - val_loss: 0.5187 - val_accuracy: 0.7477\nEpoch 35/50\n49/49 [==============================] - 52s 1s/step - loss: 0.4851 - accuracy: 0.7648 - val_loss: 0.5093 - val_accuracy: 0.7490\nEpoch 36/50\n49/49 [==============================] - 52s 1s/step - loss: 0.4757 - accuracy: 0.7641 - val_loss: 0.5361 - val_accuracy: 0.7160\nEpoch 37/50\n49/49 [==============================] - 52s 1s/step - loss: 0.4543 - accuracy: 0.7840 - val_loss: 0.5049 - val_accuracy: 0.7391\nEpoch 38/50\n49/49 [==============================] - 52s 1s/step - loss: 0.4424 - accuracy: 0.7884 - val_loss: 0.5148 - val_accuracy: 0.7312\nEpoch 39/50\n49/49 [==============================] - 52s 1s/step - loss: 0.4300 - accuracy: 0.7969 - val_loss: 0.5124 - val_accuracy: 0.7497\nEpoch 40/50\n49/49 [==============================] - 52s 1s/step - loss: 0.4464 - accuracy: 0.7813 - val_loss: 0.5421 - val_accuracy: 0.7180\nEpoch 41/50\n49/49 [==============================] - 52s 1s/step - loss: 0.4328 - accuracy: 0.7979 - val_loss: 0.5004 - val_accuracy: 0.7404\nEpoch 42/50\n49/49 [==============================] - 52s 1s/step - loss: 0.4204 - accuracy: 0.8058 - val_loss: 0.5019 - val_accuracy: 0.7444\nEpoch 43/50\n49/49 [==============================] - 52s 1s/step - loss: 0.4444 - accuracy: 0.7858 - val_loss: 0.5059 - val_accuracy: 0.7345\nEpoch 44/50\n49/49 [==============================] - 52s 1s/step - loss: 0.4256 - accuracy: 0.7995 - val_loss: 0.5043 - val_accuracy: 0.7424\nEpoch 45/50\n49/49 [==============================] - 52s 1s/step - loss: 0.4416 - accuracy: 0.7914 - val_loss: 0.4974 - val_accuracy: 0.7483\nEpoch 46/50\n49/49 [==============================] - 52s 1s/step - loss: 0.4122 - accuracy: 0.8068 - val_loss: 0.5314 - val_accuracy: 0.7299\nEpoch 47/50\n49/49 [==============================] - 52s 1s/step - loss: 0.4000 - accuracy: 0.8183 - val_loss: 0.4986 - val_accuracy: 0.7483\nEpoch 48/50\n49/49 [==============================] - 52s 1s/step - loss: 0.4128 - accuracy: 0.8035 - val_loss: 0.5397 - val_accuracy: 0.7246\nEpoch 49/50\n49/49 [==============================] - 52s 1s/step - loss: 0.4124 - accuracy: 0.8119 - val_loss: 0.5078 - val_accuracy: 0.7483\nEpoch 50/50\n49/49 [==============================] - 52s 1s/step - loss: 0.3989 - accuracy: 0.8100 - val_loss: 0.5427 - val_accuracy: 0.7358\n0.8321539693813269\n" ], [ "#model1D_doubleip.save('/content/drive/MyDrive/ML_Data/model1D_doubleip.h5')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a7adb3f2bc27b6f5ede9508d18c1fd21838234f
5,515
ipynb
Jupyter Notebook
hands-on/020_mnist_data_exploration_lazy.ipynb
Gjacquenot/Python-for-machine-learning
7beb25bac1150ba0da285aa7afe2d6f1b6d080b9
[ "CC-BY-4.0" ]
5
2020-03-24T15:08:34.000Z
2022-01-13T08:33:54.000Z
hands-on/020_mnist_data_exploration_lazy.ipynb
Gjacquenot/Python-for-machine-learning
7beb25bac1150ba0da285aa7afe2d6f1b6d080b9
[ "CC-BY-4.0" ]
1
2020-03-25T07:53:10.000Z
2020-03-25T09:51:46.000Z
hands-on/020_mnist_data_exploration_lazy.ipynb
Gjacquenot/Python-for-machine-learning
7beb25bac1150ba0da285aa7afe2d6f1b6d080b9
[ "CC-BY-4.0" ]
9
2020-03-25T06:52:42.000Z
2022-02-21T14:13:20.000Z
22.695473
197
0.560109
[ [ [ "# MNIST: learning to recognize handwritten digits", "_____no_output_____" ], [ "## Dataset exploration", "_____no_output_____" ], [ "Before starting a machine learning or data science task, it is always useful to familiarize yourself with the data set and its context.", "_____no_output_____" ], [ "### Required imports", "_____no_output_____" ] ], [ [ "from collections import Counter\nfrom keras.datasets import mnist\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport numpy as np", "_____no_output_____" ] ], [ [ "### Obtaining the dataset", "_____no_output_____" ], [ "In Keras' datasets module we have a handle to the MNIST dataset we want to use in this notebook. Download the training and test set for this data.", "_____no_output_____" ] ], [ [ "(x_train, y_train), (x_test, y_test) = mnist.load_data()", "_____no_output_____" ] ], [ [ "### Dimensions and types", "_____no_output_____" ], [ "Determine the shape and type of the training and the test set.", "_____no_output_____" ] ], [ [ "x_train.shape, x_train.dtype, y_train.shape, y_train.dtype", "_____no_output_____" ], [ "x_test.shape, x_test.dtype, y_test.shape, y_test.dtype", "_____no_output_____" ] ], [ [ "The training set has 60,000 examples, the test set 10,000. The input is a 28 $\\times$ 28 matrix of unsigned 8-bit integers, the output a single unsigned 8-bit integer.", "_____no_output_____" ], [ "### Data semantics", "_____no_output_____" ], [ " Each input represents a scanned grayscale image of a handwritten digit, the output is the corresponding integer.", "_____no_output_____" ] ], [ [ "frame = plt.gca()\nframe.axes.get_xaxis().set_visible(False)\nframe.axes.get_yaxis().set_visible(False)\nplt.imshow(x_train[0], cmap='gray');", "_____no_output_____" ], [ "y_train[0]", "_____no_output_____" ], [ "rows = 5\ncols = 7\nfigure, axes = plt.subplots(rows, cols, figsize=(5, 3))\nplt.subplots_adjust(wspace=0.1, hspace=0.1)\nfor img_nr in range(rows*cols):\n row = img_nr//cols\n col = img_nr % cols\n axes[row, col].get_xaxis().set_visible(False)\n axes[row, col].get_yaxis().set_visible(False)\n axes[row, col].imshow(x_train[img_nr], cmap='gray')", "_____no_output_____" ], [ "y_train[:rows*cols].reshape(rows, cols)", "_____no_output_____" ] ], [ [ "So this proves that I'm certainly not the only one cursed with bad handwriting.", "_____no_output_____" ], [ "### Data distribution", "_____no_output_____" ], [ "An important question is whether all digits are represented in the training and test set, and what the distribution is. This may have an impact on the accuracy of the trained model.", "_____no_output_____" ] ], [ [ "distr = Counter(y_train)\nfigure, axes = plt.subplots()\naxes.set_xticks(list(distr.keys()))\naxes.bar(list(distr.keys()), list(distr.values()));", "_____no_output_____" ], [ "distr = Counter(y_test)\nfigure, axes = plt.subplots()\naxes.set_xticks(list(distr.keys()))\naxes.bar(list(distr.keys()), list(distr.values()));", "_____no_output_____" ] ], [ [ "Although some digits like 1 are overrepresented, and others, e.g., 5 are underrepresented, the distribution seems to be reasonably uniform, and it is likely no special care needs to be taken.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ] ]
4a7ade0c51dca80ca0e8c15a128c76b377457420
1,000,991
ipynb
Jupyter Notebook
notebooks/test_heatmap.ipynb
andrewRowlinson/mplsoccer-viz-tests
d6537749747ca99decadb13017a093d15eb52453
[ "MIT" ]
2
2021-05-21T23:25:54.000Z
2021-06-03T13:09:40.000Z
notebooks/test_heatmap.ipynb
andrewRowlinson/mplsoccer-viz-tests
d6537749747ca99decadb13017a093d15eb52453
[ "MIT" ]
null
null
null
notebooks/test_heatmap.ipynb
andrewRowlinson/mplsoccer-viz-tests
d6537749747ca99decadb13017a093d15eb52453
[ "MIT" ]
null
null
null
1,651.80033
35,028
0.959689
[ [ [ "from mplsoccer import Pitch, VerticalPitch\nfrom mplsoccer.dimensions import valid, size_varies\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport random\nnp.random.seed(42)", "_____no_output_____" ] ], [ [ "# Test five points are same in both orientations", "_____no_output_____" ] ], [ [ "for pitch_type in valid:\n if pitch_type in size_varies:\n kwargs = {'pitch_length': 105, 'pitch_width': 68}\n else:\n kwargs = {}\n pitch = Pitch(pitch_type=pitch_type, line_zorder=2, **kwargs)\n pitch_vertical = VerticalPitch(pitch_type=pitch_type, line_zorder=2, **kwargs)\n fig, ax = plt.subplots(ncols=2, figsize=(12, 7))\n fig.suptitle(pitch_type)\n \n x = np.random.uniform(low=pitch.dim.pitch_extent[0], high=pitch.dim.pitch_extent[1], size=5)\n y = np.random.uniform(low=pitch.dim.pitch_extent[2], high=pitch.dim.pitch_extent[3], size=5)\n \n pitch.draw(ax[0])\n pitch.scatter(x, y, ax=ax[0], color='red', zorder=3)\n stats = pitch.bin_statistic(x, y)\n stats['statistic'][stats['statistic'] == 0] = np.nan\n hm = pitch.heatmap(stats, ax=ax[0])\n txt = pitch.label_heatmap(stats, color='white', ax=ax[0])\n \n pitch_vertical.draw(ax[1])\n pitch_vertical.scatter(x, y, ax=ax[1], color='red', zorder=3)\n stats_vertical = pitch_vertical.bin_statistic(x, y)\n stats_vertical['statistic'][stats_vertical['statistic'] == 0] = np.nan\n hm_vertical = pitch_vertical.heatmap(stats_vertical, ax=ax[1])\n txt_vertical = pitch_vertical.label_heatmap(stats, color='white', ax=ax[1])", "_____no_output_____" ] ], [ [ "# Test five points are same in both orientations - positional", "_____no_output_____" ] ], [ [ "for pitch_type in valid:\n if pitch_type in size_varies:\n kwargs = {'pitch_length': 105, 'pitch_width': 68}\n else:\n kwargs = {}\n pitch = Pitch(pitch_type=pitch_type, line_zorder=2, **kwargs)\n pitch_vertical = VerticalPitch(pitch_type=pitch_type, line_zorder=2, **kwargs)\n fig, ax = plt.subplots(ncols=2, figsize=(12, 7))\n fig.suptitle(pitch_type)\n \n x = np.random.uniform(low=pitch.dim.pitch_extent[0], high=pitch.dim.pitch_extent[1], size=5)\n y = np.random.uniform(low=pitch.dim.pitch_extent[2], high=pitch.dim.pitch_extent[3], size=5)\n \n pitch.draw(ax[0])\n pitch.scatter(x, y, ax=ax[0], color='red', zorder=3)\n stats = pitch.bin_statistic_positional(x, y)\n hm = pitch.heatmap_positional(stats, ax=ax[0])\n txt = pitch.label_heatmap(stats, color='white', ax=ax[0])\n \n pitch_vertical.draw(ax[1])\n pitch_vertical.scatter(x, y, ax=ax[1], color='red', zorder=3)\n stats_vertical = pitch_vertical.bin_statistic_positional(x, y)\n hm_vertical = pitch_vertical.heatmap_positional(stats_vertical, ax=ax[1])\n txt_vertical = pitch_vertical.label_heatmap(stats, color='white', ax=ax[1])", "_____no_output_____" ] ], [ [ "# Test edges - positional x", "_____no_output_____" ] ], [ [ "for pitch_type in valid:\n if pitch_type in size_varies:\n kwargs = {'pitch_length': 105, 'pitch_width': 68}\n else:\n kwargs = {}\n pitch = Pitch(pitch_type=pitch_type, line_zorder=2, pitch_color='None', axis=True, label=True, **kwargs)\n pitch_vertical = VerticalPitch(pitch_type=pitch_type, line_zorder=2, pitch_color='None', axis=True, label=True, **kwargs)\n fig, ax = plt.subplots(ncols=2, figsize=(12, 7))\n fig.suptitle(pitch_type)\n \n x = pitch.dim.positional_x\n y = np.random.uniform(low=pitch.dim.pitch_extent[2], high=pitch.dim.pitch_extent[3], size=x.size)\n \n pitch.draw(ax[0])\n pitch.scatter(x, y, ax=ax[0], color='red', zorder=3)\n stats = pitch.bin_statistic_positional(x, y)\n hm = pitch.heatmap_positional(stats, ax=ax[0], edgecolors='yellow')\n txt = pitch.label_heatmap(stats, color='white', ax=ax[0])\n \n pitch_vertical.draw(ax[1])\n pitch_vertical.scatter(x, y, ax=ax[1], color='red', zorder=3)\n stats_vertical = pitch_vertical.bin_statistic_positional(x, y)\n hm_vertical = pitch_vertical.heatmap_positional(stats_vertical, ax=ax[1], edgecolors='yellow')\n txt_vertical = pitch_vertical.label_heatmap(stats_vertical, color='white', ax=ax[1])", "_____no_output_____" ] ], [ [ "# Test edges - positional y", "_____no_output_____" ] ], [ [ "for pitch_type in valid:\n if pitch_type in size_varies:\n kwargs = {'pitch_length': 105, 'pitch_width': 68}\n else:\n kwargs = {}\n pitch = Pitch(pitch_type=pitch_type, line_zorder=2, pitch_color='None', axis=True, label=True, **kwargs)\n pitch_vertical = VerticalPitch(pitch_type=pitch_type, line_zorder=2, pitch_color='None', axis=True, label=True, **kwargs)\n fig, ax = plt.subplots(ncols=2, figsize=(12, 7))\n fig.suptitle(pitch_type)\n \n y = pitch.dim.positional_y\n x = np.random.uniform(low=pitch.dim.pitch_extent[0], high=pitch.dim.pitch_extent[1], size=y.size)\n\n pitch.draw(ax[0])\n pitch.scatter(x, y, ax=ax[0], color='red', zorder=3)\n stats = pitch.bin_statistic_positional(x, y)\n hm = pitch.heatmap_positional(stats, ax=ax[0], edgecolors='yellow')\n txt = pitch.label_heatmap(stats, color='white', ax=ax[0])\n \n pitch_vertical.draw(ax[1])\n pitch_vertical.scatter(x, y, ax=ax[1], color='red', zorder=3)\n stats_vertical = pitch_vertical.bin_statistic_positional(x, y)\n hm_vertical = pitch_vertical.heatmap_positional(stats_vertical, ax=ax[1], edgecolors='yellow')\n txt_vertical = pitch_vertical.label_heatmap(stats_vertical, color='white', ax=ax[1])", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a7aeb72d0b251d6e083c126add96f678da8d540
15,818
ipynb
Jupyter Notebook
Bones_4_OuterCV-Pipelines-ACC.ipynb
muntisa/Goat-Bones-Machine-Learning
4abf370cf8479a915fa8c8144b9e98a074fd2fb9
[ "MIT" ]
4
2019-05-28T09:29:49.000Z
2019-10-10T11:40:30.000Z
Bones_4_OuterCV-Pipelines-ACC.ipynb
muntisa/Goat-Bones-Machine-Learning
4abf370cf8479a915fa8c8144b9e98a074fd2fb9
[ "MIT" ]
null
null
null
Bones_4_OuterCV-Pipelines-ACC.ipynb
muntisa/Goat-Bones-Machine-Learning
4abf370cf8479a915fa8c8144b9e98a074fd2fb9
[ "MIT" ]
null
null
null
29.677298
154
0.53123
[ [ [ "# Pipelines for classifiers using Balanced Accuracy\n\nFor each dataset, classifier and folds:\n- Robust scaling\n- 2, 3, 5, 10-fold outer CV\n- balanced accurary as score\n\nWe will use folders *datasets2* and *results2*.", "_____no_output_____" ] ], [ [ "%reload_ext autoreload\n%autoreload 2\n%matplotlib inline\n\n# remove warnings\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning) ", "_____no_output_____" ], [ "import numpy as np\nimport pandas as pd\nimport time\nimport matplotlib.pyplot as plt\n\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.model_selection import cross_val_score, GridSearchCV, StratifiedKFold, LeaveOneOut\nfrom sklearn.metrics import confusion_matrix,accuracy_score, roc_auc_score,f1_score, recall_score, precision_score\nfrom sklearn.utils import class_weight\n\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.linear_model import LogisticRegression, LassoCV\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier\nfrom xgboost import XGBClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.gaussian_process.kernels import RBF\nfrom sklearn.svm import LinearSVC\n\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler\nfrom sklearn.feature_selection import RFECV, VarianceThreshold, SelectKBest, chi2\nfrom sklearn.feature_selection import SelectFromModel, SelectPercentile, f_classif\n\nimport os", "_____no_output_____" ], [ "!ls ./datasets2/*", "_____no_output_____" ], [ "!ls ./results2/*", "_____no_output_____" ], [ "# get list of files in datasets2 = all datasets\ndsList = os.listdir('./datasets2')\nprint('--> Found', len(dsList), 'dataset files')", "_____no_output_____" ], [ "# create a list with all output variable names \noutVars = []\nfor eachdsFile in dsList:\n outVars.append( (eachdsFile[:-4])[3:] )", "_____no_output_____" ] ], [ [ "### Define script parameters", "_____no_output_____" ] ], [ [ "# define list of folds\nfoldTypes = [2,3,5,10]\n\n# define a label for output files\ntargetName = '_Outer'\n\nseed = 42", "_____no_output_____" ] ], [ [ "### Function definitions", "_____no_output_____" ] ], [ [ "def set_weights(y_data, option='balanced'):\n \"\"\"Estimate class weights for umbalanced dataset\n If ‘balanced’, class weights will be given by n_samples / (n_classes * np.bincount(y)). \n If a dictionary is given, keys are classes and values are corresponding class weights. \n If None is given, the class weights will be uniform \"\"\"\n cw = class_weight.compute_class_weight(option, np.unique(y_data), y_data)\n w = {i:j for i,j in zip(np.unique(y_data), cw)}\n return w ", "_____no_output_____" ], [ "def getDataFromDataset(sFile, OutVar):\n # read details file\n print('\\n-> Read dataset', sFile)\n df = pd.read_csv(sFile)\n #df = feather.read_dataframe(sFile)\n print('Shape', df.shape)\n # print(list(df.columns))\n \n # select X and Y\n ds_y = df[OutVar]\n ds_X = df.drop(OutVar,axis = 1)\n Xdata = ds_X.values # get values of features\n Ydata = ds_y.values # get output values\n\n print('Shape X data:', Xdata.shape)\n print('Shape Y data:',Ydata.shape)\n \n # return data for X and Y, feature names as list\n return (Xdata, Ydata, list(ds_X.columns))", "_____no_output_____" ], [ "def Pipeline_OuterCV(Xdata, Ydata, label = 'my', class_weights = {0: 1, 1: 1}, folds = 3, seed = 42):\n # inputs:\n # data for X, Y; a label about data, number of folds, seeed\n # default: 3-fold CV\n \n # define classifiers\n names = ['KNN', 'SVM linear', 'SVM', 'LR', 'DT', 'RF', 'XGB']\n classifiers = [KNeighborsClassifier(3),\n SVC(kernel=\"linear\",random_state=seed,gamma='scale'),\n SVC(kernel = 'rbf', random_state=seed,gamma='auto'),\n LogisticRegression(solver='lbfgs',random_state=seed),\n DecisionTreeClassifier(random_state = seed),\n RandomForestClassifier(n_estimators=50,n_jobs=-1,random_state=seed),\n XGBClassifier(n_jobs=-1,seed=seed)\n ]\n # results dataframe: each column for a classifier\n df_res = pd.DataFrame(columns=names)\n\n # build each classifier\n print('* Building scaling+feature selection+outer '+str(folds)+'-fold CV for '+str(len(names))+' classifiers:', str(names))\n total = time.time()\n \n # define a fold-CV for all the classifier\n outer_cv = StratifiedKFold(n_splits=folds,shuffle=True,random_state=seed)\n \n # use each ML\n for name, clf in zip(names, classifiers):\n start = time.time()\n \n # create pipeline: scaler + classifier\n estimators = []\n \n # SCALER\n estimators.append(('Scaler', RobustScaler() ))\n \n # add Classifier\n estimators.append(('Classifier', clf)) \n \n # create pipeline\n model = Pipeline(estimators)\n \n # evaluate pipeline\n scores = cross_val_score(model, Xdata, Ydata, cv=outer_cv, scoring='balanced_accuracy', n_jobs=-1)\n df_res[name] = scores\n print('%s, MeanScore=%0.2f, Time:%0.1f mins' % (name, scores.mean(), (time.time() - start)/60))\n \n # save results\n resFile = './results2/'+str(label)+str(targetName)+'_Outer-'+str(folds)+'-foldCV.csv'\n df_res.to_csv(resFile, index=False)\n print('* Scores saved', resFile) \n print('Total time:', (time.time() - total)/60, ' mins') \n \n # return scores for all classifiers as dataframe (each column a classifier)\n return df_res", "_____no_output_____" ] ], [ [ "### Calculations", "_____no_output_____" ] ], [ [ "df_results = None # all results \n\n# apply MLs to each data\nfor OutVar in outVars:\n sFile = './datasets2/ds.'+str(OutVar)+'.csv'\n\n # get data from file\n Xdata, Ydata, Features = getDataFromDataset(sFile,OutVar)\n\n # Calculate class weights\n class_weights = set_weights(Ydata)\n print(\"Class weights = \", class_weights)\n \n # try different folds for each subset -> box plots\n for folds in foldTypes:\n \n # calculate outer CV for different binary classifiers\n df_fold = Pipeline_OuterCV(Xdata, Ydata, label = OutVar, class_weights = class_weights, folds = folds, seed = seed)\n df_fold['Dataset'] = OutVar\n df_fold['folds'] = folds\n \n # add each result to a summary dataframe\n df_results = pd.concat([df_results,df_fold])", "_____no_output_____" ], [ "# save the results to file\n\nresFile = './results2/'+'ML_Outer-n-foldCV.csv'\ndf_results.to_csv(resFile, index=False)", "_____no_output_____" ] ], [ [ "### Mean scores", "_____no_output_____" ] ], [ [ "# calculate means of ACC scores for each ML\ndf_means =df_results.groupby(['Dataset','folds'], as_index = False).mean()[['Dataset', 'folds','KNN', 'SVM linear', 'SVM', 'LR', 'DT', 'RF', 'XGB']]", "_____no_output_____" ], [ "# save averaged values\nresFile_means = './results2/'+'ML_Outer-n-foldCV_means.csv'\ndf_means.to_csv(resFile_means, index=False)", "_____no_output_____" ] ], [ [ "### Best ML results", "_____no_output_____" ] ], [ [ "# find the maximum value rows for all MLs\nbestMLs = df_means[['KNN', 'SVM linear', 'SVM', 'LR', 'DT', 'RF', 'XGB']].idxmax()\nprint(bestMLs)", "_____no_output_____" ], [ "# get the best score by ML method\nfor ML in ['KNN', 'SVM linear', 'SVM', 'LR', 'DT', 'RF', 'XGB']:\n print(ML, '\\t', list(df_means.iloc[df_means[ML].idxmax()][['Dataset', 'folds', ML]]))", "_____no_output_____" ], [ "# Add a new column with the original output name (get first 2 characters from Dataset column)\ngetOutOrig = []\nfor each in df_means['Dataset']:\n getOutOrig.append(each[:2])\ndf_means['Output'] = getOutOrig\ndf_means", "_____no_output_____" ], [ "# save new results including extra column with output variable name\nresFile_means2 = './results2/'+'ML_Outer-n-foldCV_means2.csv'\ndf_means.to_csv(resFile_means2, index=False)", "_____no_output_____" ] ], [ [ "### Get the best ML for each type of output\n\nWe are checking all 2, 3, 5, 10-fold CV results:", "_____no_output_____" ] ], [ [ "for outName in list(set(df_means['Output'])):\n print('*********************')\n print('OUTPUT =', outName)\n df_sel = df_means[df_means['Output'] == outName].copy()\n for ML in ['KNN', 'SVM linear', 'SVM', 'LR', 'DT', 'RF', 'XGB']:\n print(ML, '\\t', list(df_sel.loc[df_sel[ML].idxmax(),:][['Dataset', 'folds', ML]]))", "_____no_output_____" ], [ "df_sel.loc[df_sel[ML].idxmax(),:]", "_____no_output_____" ] ], [ [ "### Get the best ML for each type of output for 10-fold CV", "_____no_output_____" ] ], [ [ "df_10fold = df_means[df_means['folds']==10].copy()\ndf_10fold.head()", "_____no_output_____" ], [ "for outName in list(set(df_10fold['Output'])):\n print('*********************')\n print('OUTPUT =', outName)\n \n df_sel = df_10fold[df_10fold['Output'] == outName].copy()\n print('MAX =',df_sel[['KNN', 'SVM linear', 'SVM', 'LR', 'DT', 'RF', 'XGB']].max().max())\n \n for ML in ['KNN', 'SVM linear', 'SVM', 'LR', 'DT', 'RF', 'XGB']:\n print(ML, '\\t', list(df_sel.loc[df_sel[ML].idxmax(),:][['Dataset', 'folds', ML]]))", "_____no_output_____" ] ], [ [ "### Get the best ML for each type of output for 5-fold CV", "_____no_output_____" ] ], [ [ "df_5fold = df_means[df_means['folds']==5].copy()\ndf_5fold.head()", "_____no_output_____" ], [ "for outName in list(set(df_5fold['Output'])):\n print('*********************')\n print('OUTPUT =', outName)\n \n df_sel = df_5fold[df_5fold['Output'] == outName].copy()\n print('MAX =',df_sel[['KNN', 'SVM linear', 'SVM', 'LR', 'DT', 'RF', 'XGB']].max().max())\n \n for ML in ['KNN', 'SVM linear', 'SVM', 'LR', 'DT', 'RF', 'XGB']:\n print(ML, '\\t', list(df_sel.loc[df_sel[ML].idxmax(),:][['Dataset', 'folds', ML]]))", "_____no_output_____" ] ], [ [ "Get only the best values from all MLs for 5- and 10-fold CV:", "_____no_output_____" ] ], [ [ "print('5-fold CV')\nfor outName in list(set(df_5fold['Output'])):\n df_sel = df_5fold[df_5fold['Output'] == outName].copy()\n print(outName,df_sel[['KNN', 'SVM linear', 'SVM', 'LR', 'DT', 'RF', 'XGB']].max().max())\n \nprint('10-fold CV')\nfor outName in list(set(df_10fold['Output'])):\n df_sel = df_10fold[df_10fold['Output'] == outName].copy()\n print(outName,df_sel[['KNN', 'SVM linear', 'SVM', 'LR', 'DT', 'RF', 'XGB']].max().max())", "_____no_output_____" ] ], [ [ "**Conclusion**: even with **5,10-CV** we are able to obtain classification models with **ACC > 0.70** and in one case with **ACC > 0.81**.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
4a7aed786f46631cd76acd428533c46ced93b12a
41,112
ipynb
Jupyter Notebook
Chap-10-Section-10-7-sphere.ipynb
Spationaute/GCH200
55144f5b2a59a7240d36c985997387f5036149f7
[ "MIT" ]
1
2018-02-26T16:29:58.000Z
2018-02-26T16:29:58.000Z
Chap-10-Section-10-7-sphere.ipynb
Spationaute/GCH200
55144f5b2a59a7240d36c985997387f5036149f7
[ "MIT" ]
null
null
null
Chap-10-Section-10-7-sphere.ipynb
Spationaute/GCH200
55144f5b2a59a7240d36c985997387f5036149f7
[ "MIT" ]
2
2018-02-27T15:04:33.000Z
2021-06-03T16:38:07.000Z
183.535714
32,770
0.887113
[ [ [ "|<img style=\"float:left;\" src=\"http://pierreproulx.espaceweb.usherbrooke.ca/images/usherb_transp.gif\" > |Pierre Proulx, ing, professeur|\n|:---|:---|\n|Département de génie chimique et de génie biotechnologique |** GCH200-Phénomènes d'échanges I **|\n", "_____no_output_____" ], [ "### Section 10.6, Conduction de la chaleur dans une sphère\n", "_____no_output_____" ] ], [ [ "#\n# Pierre Proulx\n#\n# Préparation de l'affichage et des outils de calcul symbolique\n#\nimport sympy as sp\nfrom IPython.display import *\nsp.init_printing(use_latex=True)\n%matplotlib inline", "_____no_output_____" ], [ "# Paramètres, variables et fonctions\nr,k01,k12,k23,h0,h3=sp.symbols('r k_1 k_2 k_3 h_0 h_3') \nr0,r1,r2,r3,Ta,Tb=sp.symbols('r_0 r_1 r_2 r_3 T_a T_b') \nq=sp.symbols('q')\nT=sp.Function('T')(r) ", "_____no_output_____" ], [ "eq1=sp.Eq(k01/r**2*sp.Derivative(r**2*sp.Derivative(T,r)),0)\neq2=sp.Eq(k12/r**2*sp.Derivative(r**2*sp.Derivative(T,r)),0)\neq3=sp.Eq(k23/r**2*sp.Derivative(r**2*sp.Derivative(T,r)),0)\nT1=sp.dsolve(eq1).rhs\nT2=sp.dsolve(eq2)\nT2=T2.subs(sp.symbols('C1'),sp.symbols('C3'))\nT2=T2.subs(sp.symbols('C2'),sp.symbols('C4')).rhs\nT3=sp.dsolve(eq3)\nT3=T3.subs(sp.symbols('C1'),sp.symbols('C5'))\nT3=T3.subs(sp.symbols('C2'),sp.symbols('C6')).rhs\ndisplay(T1)\ndisplay(T2)\ndisplay(T3)", "_____no_output_____" ], [ "# Maintenant on pose les conditions aux limites pour trouver les 6 constantes\ncl1=sp.Eq(T1.subs(r,r1)-T2.subs(r,r1)) # températures égales sur les points intérieurs\ncl2=sp.Eq(T2.subs(r,r2)-T3.subs(r,r2))\n# flux égaux sur les points intérieurs\ncl3=sp.Eq(k01*T1.diff(r).subs(r,r1)-k12*T2.diff(r).subs(r,r1))\ncl4=sp.Eq(k12*T2.diff(r).subs(r,r2)-k23*T3.diff(r).subs(r,r2))\n# flux donnés par la loi de refroidissement de Newton sur les parois\ncl5=sp.Eq(-k01*T1.diff(r).subs(r,r0)+h0*(T1.subs(r,r0)-Ta))\ncl6=sp.Eq(-k23*T3.diff(r).subs(r,r3)+h3*(Tb-T3.subs(r,r3)))", "_____no_output_____" ], [ "constantes=sp.solve((cl1,cl2,cl3,cl4,cl5,cl6),sp.symbols('C1 C2 C3 C4 C5 C6'))\nT1=T1.subs(constantes)\nT2=T2.subs(constantes)\nT3=T3.subs(constantes)\ndico={'k_1':4,'k_2':25,'k_3':1,\n 'h_0':100,'h_3':20,'r_0':0.020,'r_1':0.025,'r_2':0.026,'r_3':0.035,'T_a':100,'T_b':20}\nT1p=T1.subs(dico)\nT2p=T2.subs(dico)\nT3p=T3.subs(dico)\n# Calcule les taux de chaleur en 0 et en 3 (doivent être égaux) (watts / mètre de longueur)\n#\ntaux3=(h3*(T3-Tb)*2*sp.pi*r3).subs(dico) # pour mettre les valeurs numériques dans\ntaux0=(h0*(Ta-T1)*2*sp.pi*r0).subs(dico) # l'expression symbolique, on subs(dico)\n#\n#\nprint(taux3.subs(r,r3.subs(dico)), taux0.subs(r,r0.subs(dico))) \nimport matplotlib.pyplot as plt\nplt.rcParams['figure.figsize'] = 12, 10\n#plt.figure(figsize=(12,10))\np=sp.plot((T1p,(r,r0.subs(dico),r1.subs(dico)))\n ,(T2p,(r,r1.subs(dico),r2.subs(dico)))\n ,(T3p,(r,r2.subs(dico),r3.subs(dico)))\n ,legend=True,ylabel='T(r)',xlabel='r',show=False) #affiche pas tout de suite\np[0].line_color = 'red'\np[0].label='de r = r_0 à r=r_1 '\np[1].line_color = 'black'\np[1].label='de r = r_1 à r=r_2 '\np[2].line_color = 'green'\np[2].label='de r = r_2 à r=r_3 '\np.show() # maintenant on est prêts à afficher", "58.4073955957775*pi 102.212942292611*pi\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ] ]
4a7af830226dc8865cba8dce6cbae0d13ce2cb89
185,369
ipynb
Jupyter Notebook
examples/seismic/tutorials/15_tti_qp_pure.ipynb
wszhang/devito
a7dbdabe505ded73781ca06e0a1c40b4d582655d
[ "MIT" ]
null
null
null
examples/seismic/tutorials/15_tti_qp_pure.ipynb
wszhang/devito
a7dbdabe505ded73781ca06e0a1c40b4d582655d
[ "MIT" ]
70
2020-07-16T05:57:32.000Z
2022-03-21T04:06:35.000Z
examples/seismic/tutorials/15_tti_qp_pure.ipynb
ofmla/devito
9f1495dc521386f1fae96de5d21d15e05c24df99
[ "MIT" ]
null
null
null
217.059719
150,912
0.891174
[ [ [ "# TTI pure qP-wave equation implementation", "_____no_output_____" ], [ "The aim of this notebook is to show how to solve the pure qP-wave equation using the finite-difference (FD) scheme. The 2D TTI pure qP-wave equation can be written as ([Mu et al., 2020](https://library.seg.org/doi/10.1190/geo2019-0320.1))\n\n$$\\begin{align}\n\\frac{1}{v_{p}^{2}}\\frac{\\partial^{2}p(\\textbf{x},t)}{\\partial t^{2}} = & \\,\\, (1+2\\delta\\sin^{2}\\theta\\cos^{2}\\theta + 2\\epsilon\\cos^{4}\\theta)\\frac{\\partial^{4}q(\\textbf{x},t)}{\\partial x^{4}} \\\\\n& + (1+2\\delta\\sin^{2}\\theta\\cos^{2}\\theta + 2\\epsilon\\sin^{4}\\theta)\\frac{\\partial^{4}q(\\textbf{x},t)}{\\partial z^{4}} \\\\\n& + (2 - \\delta\\sin^{2}2\\theta+3\\epsilon\\sin^{2}2\\theta+2\\delta\\cos^{2}\\theta)\\frac{\\partial^{4}q(\\textbf{x},t)}{\\partial x^{2}\\partial z^{2}} \\\\\n& +(\\delta\\sin4\\theta-4\\epsilon\\sin2\\theta\\cos^{2}\\theta)\\frac{\\partial^4 q(\\textbf{x},t)}{\\partial x^{3}\\partial z} \\\\\n& +(-\\delta\\sin4\\theta-4\\epsilon\\sin2\\theta\\cos^{2}\\theta)\\frac{\\partial^4 q(\\textbf{x},t)}{\\partial x\\partial z^{3}} \\\\\n& + f(\\textbf{x}_{s},t),\n\\end{align}$$\n\n$$\n\\frac{\\partial^{2}q(\\textbf{x},t)}{\\partial x^{2}} + \\frac{\\partial^{2}q(\\textbf{x},t)}{\\partial z^{2}} = p(\\textbf{x},t),\n$$\n\nwhere $q(\\textbf{x},t)$ is an auxiliary wavefield, which is introduced for implementing the FD scheme.", "_____no_output_____" ], [ "First of all, it is necessary to import some Devito modules and other packages that will be used in the implementation. We set Devito logging `configuration ['log-level'] = 'DEBUG'` to view all processing times (i,e. compilation and execution of `Operators`)", "_____no_output_____" ] ], [ [ "import numpy as np\n\nfrom devito import (Function, TimeFunction, cos, sin, solve,\n Eq, Operator, configuration, norm)\nfrom examples.seismic import TimeAxis\nfrom examples.seismic import RickerSource\nfrom examples.seismic import Receiver\nfrom examples.seismic import demo_model\nfrom matplotlib import pyplot as plt\n\n# Set logging to debug, captures statistics on the performance of operators\n#configuration['log-level'] = 'INFO'\nconfiguration['log-level']='DEBUG'", "_____no_output_____" ] ], [ [ "We will start with the definitions of the grid and the physical parameters $v_{p}, \\theta, \\epsilon, \\delta$. For simplicity, we don't use any absorbing boundary conditions. We use a homogeneous model. The model is discretized with a grid of $101 \\times 101$ and spacing of 10 m. The $v_{p}, \\epsilon, \\delta$ and $\\theta$ parameters of this model are 3600 m∕s, 0.23, 0.17, and 45°, respectively. ", "_____no_output_____" ] ], [ [ "# NBVAL_IGNORE_OUTPUT\n\ndtype = np.float32 # 32 bit floating point as the precision type\nspace_order = 8 \n\nshape = (101,101) # 101x101 grid\nspacing = (10.,10.) # spacing of 10 meters\norigin = (0.,0.) \nnbl = 0 # number of pad points\n\nmodel = demo_model('constant-tti', spacing=spacing, space_order=8,\n shape=shape, nbl=nbl, dtype=dtype)\n\n# initialize thomsem parameters to those used in Mu et al., (2020)\nmodel.update('vp', np.ones(shape)*3.6)\nmodel.update('epsilon', np.ones(shape)*0.23)\nmodel.update('delta', np.ones(shape)*0.17)\nmodel.update('theta', np.ones(shape)*(45.*(np.pi/180.)))", "Allocating memory for vp(117, 117)\nOperator `pad_vp` generated in 0.10 s\n * lowering.IET: 0.04 s (43.1 %)\n * specializing.IET: 0.02 s (21.6 %)\n * lowering.Clusters: 0.03 s (32.3 %)\n * specializing.Clusters: 0.02 s (21.6 %)\n * lowering.Expressions: 0.03 s (32.3 %)\nFlops reduction after symbolic optimization: [0 --> 0]\nOperator `pad_vp` fetched `/tmp/devito-jitcache-uid1001/eb0b4836d1cb5c11bc20c5ed0a1f10bf88a26987.c` in 0.07 s from jit-cache\nOperator `pad_vp` ran in 0.01 s\nPerformance[mode=advanced] arguments: {}\nAllocating memory for epsilon(117, 117)\nOperator `pad_epsilon` generated in 0.10 s\n * lowering.IET: 0.04 s (42.3 %)\n * specializing.IET: 0.02 s (21.2 %)\n * lowering.Clusters: 0.04 s (42.3 %)\n * specializing.Clusters: 0.02 s (21.2 %)\n * lowering.Expressions: 0.03 s (31.8 %)\nFlops reduction after symbolic optimization: [0 --> 0]\nOperator `pad_epsilon` fetched `/tmp/devito-jitcache-uid1001/5ade86409010323ebd717c48274e6de7214c63cd.c` in 0.04 s from jit-cache\nOperator `pad_epsilon` ran in 0.01 s\nPerformance[mode=advanced] arguments: {}\nAllocating memory for delta(117, 117)\nOperator `pad_delta` generated in 0.11 s\n * lowering.IET: 0.04 s (39.3 %)\n * lowering.Clusters: 0.04 s (39.3 %)\n * lowering.Expressions: 0.03 s (29.5 %)\nFlops reduction after symbolic optimization: [0 --> 0]\nOperator `pad_delta` fetched `/tmp/devito-jitcache-uid1001/fd1f464d5ebb87cfc9c48d75069221f17c342191.c` in 0.06 s from jit-cache\nOperator `pad_delta` ran in 0.01 s\nPerformance[mode=advanced] arguments: {}\nAllocating memory for theta(117, 117)\nOperator `pad_theta` generated in 0.11 s\n * lowering.IET: 0.04 s (38.8 %)\n * lowering.Clusters: 0.04 s (38.8 %)\n * lowering.Expressions: 0.03 s (29.1 %)\nFlops reduction after symbolic optimization: [0 --> 0]\nOperator `pad_theta` fetched `/tmp/devito-jitcache-uid1001/8e5b46910a68f6290d320d47ab24fbbf97232ca9.c` in 0.02 s from jit-cache\nOperator `pad_theta` ran in 0.01 s\nPerformance[mode=advanced] arguments: {}\n" ] ], [ [ "In cell below, symbols used in the PDE definition are obtained from the `model` object. Note that trigonometric functions proper of Devito are exploited.", "_____no_output_____" ] ], [ [ "# Get symbols from model\ntheta = model.theta\ndelta = model.delta\nepsilon = model.epsilon\nm = model.m\n\n# Use trigonometric functions from Devito\ncostheta = cos(theta)\nsintheta = sin(theta)\ncos2theta = cos(2*theta)\nsin2theta = sin(2*theta)\nsin4theta = sin(4*theta)", "_____no_output_____" ] ], [ [ "Accordingly to [Mu et al., (2020)](https://library.seg.org/doi/10.1190/geo2019-0320.1), the time sampling can be chosen as \n$$\n\\Delta t < \\frac{\\Delta d}{\\pi \\cdot (v_{p})_{max}}\\sqrt{\\dfrac{1}{(1+\\eta_{max}|\\cos\\theta-\\sin\\theta|_{max}^{2})}}\n$$,\n\nwhere $\\eta_{max}$ denotes the maximum value between $|\\epsilon|_{max}$ and $|\\delta|_{max}$, $|cos\\theta − sin\\theta|_{max}$ is the maximum value of $|cos\\theta − sin\\theta|$.", "_____no_output_____" ] ], [ [ "# NBVAL_IGNORE_OUTPUT\n\n# Values used to compute the time sampling\nepsilonmax = np.max(np.abs(epsilon.data[:]))\ndeltamax = np.max(np.abs(delta.data[:]))\netamax = max(epsilonmax, deltamax)\nvmax = model._max_vp\nmax_cos_sin = np.amax(np.abs(np.cos(theta.data[:]) - np.sin(theta.data[:])))\ndvalue = min(spacing)", "Allocating memory for n(1,)\n" ] ], [ [ "The next step is to define the simulation time. It has to be small enough to avoid reflections from borders. Note we will use the `dt` computed below rather than the one provided by the property() function `critical_dt` in the `SeismicModel` class, as the latter only works for the coupled pseudoacoustic equation.", "_____no_output_____" ] ], [ [ "# Compute the dt and set time range\nt0 = 0. # Simulation time start\ntn = 160. # Simulation time end (0.16 second = 160 msec)\ndt = (dvalue/(np.pi*vmax))*np.sqrt(1/(1+etamax*(max_cos_sin)**2)) # eq. above (cell 3)\ntime_range = TimeAxis(start=t0,stop=tn,step=dt)\nprint(\"time_range; \", time_range)", "time_range; TimeAxis: start=0, stop=160.039, step=0.884194, num=182\n" ] ], [ [ "In exactly the same form as in the [Cavity flow with Navier-Stokes]() tutorial, we will use two operators, one for solving the Poisson equation in pseudotime and one for advancing in time. But unlike what was done in such tutorial, in this case, we write the FD solution of the poisson equation in a manually way, without using the `laplace` shortcut and `solve` functionality (just to break up the routine and try to vary). The internal time loop can be controlled by supplying the number of pseudotime steps (`niter_poisson` iterations) as a `time` argument to the operator. A Ricker wavelet source with peak frequency of 20 Hz is located at center of the model.", "_____no_output_____" ] ], [ [ "# NBVAL_IGNORE_OUTPUT\n\n# time stepping \np = TimeFunction(name=\"p\", grid=model.grid, time_order=2, space_order=2)\nq = Function(name=\"q\", grid=model.grid, space_order=8)\n\n# Main equations\nterm1_p = (1 + 2*delta*(sintheta**2)*(costheta**2) + 2*epsilon*costheta**4)*q.dx4\nterm2_p = (1 + 2*delta*(sintheta**2)*(costheta**2) + 2*epsilon*sintheta**4)*q.dy4\nterm3_p = (2-delta*(sin2theta)**2 + 3*epsilon*(sin2theta)**2 + 2*delta*(cos2theta)**2)*((q.dy2).dx2)\nterm4_p = ( delta*sin4theta - 4*epsilon*sin2theta*costheta**2)*((q.dy).dx3)\nterm5_p = (-delta*sin4theta - 4*epsilon*sin2theta*sintheta**2)*((q.dy3).dx)\n\nstencil_p = solve(m*p.dt2 - (term1_p + term2_p + term3_p + term4_p + term5_p), p.forward)\nupdate_p = Eq(p.forward, stencil_p)\n\n# Poisson eq. (following notebook 6 from CFD examples)\nb = Function(name='b', grid=model.grid, space_order=2)\npp = TimeFunction(name='pp', grid=model.grid, space_order=2)\n\n# Create stencil and boundary condition expressions\nx, z = model.grid.dimensions\nt = model.grid.stepping_dim\n\nupdate_q = Eq( pp[t+1,x,z],((pp[t,x+1,z] + pp[t,x-1,z])*z.spacing**2 + (pp[t,x,z+1] + pp[t,x,z-1])*x.spacing**2 -\n b[x,z]*x.spacing**2*z.spacing**2) / (2*(x.spacing**2 + z.spacing**2)))\n\nbc = [Eq(pp[t+1,x, 0], 0.)]\nbc += [Eq(pp[t+1,x, shape[1]+2*nbl-1], 0.)]\nbc += [Eq(pp[t+1,0, z], 0.)]\nbc += [Eq(pp[t+1,shape[0]-1+2*nbl, z], 0.)]\n\n# set source and receivers\nsrc = RickerSource(name='src',grid=model.grid,f0=0.02,npoint=1,time_range=time_range)\nsrc.coordinates.data[:,0] = model.domain_size[0]* .5\nsrc.coordinates.data[:,1] = model.domain_size[0]* .5\n# Define the source injection\nsrc_term = src.inject(field=p.forward,expr=src * dt**2 / m)\n\nrec = Receiver(name='rec',grid=model.grid,npoint=shape[0],time_range=time_range)\nrec.coordinates.data[:, 0] = np.linspace(model.origin[0],model.domain_size[0], num=model.shape[0])\nrec.coordinates.data[:, 1] = 2*spacing[1]\n# Create interpolation expression for receivers\nrec_term = rec.interpolate(expr=p.forward)\n\n# Operators\noptime=Operator([update_p] + src_term + rec_term)\noppres=Operator([update_q] + bc)\n\n# you can print the generated code for both operators by typing print(optime) and print(oppres)", "Allocating memory for src(182, 1)\nAllocating memory for src_coords(1, 2)\nAllocating memory for rec_coords(101, 2)\nOperator `Kernel` generated in 1.68 s\n * lowering.Expressions: 0.73 s (43.6 %)\n * lowering.Clusters: 0.70 s (41.8 %)\n * specializing.Clusters: 0.59 s (35.2 %)\n * cire: 0.37 s (22.1 %)\nFlops reduction after symbolic optimization: [1047 --> 155]\nOperator `Kernel` generated in 0.12 s\n * lowering.Clusters: 0.05 s (44.8 %)\n * specializing.Clusters: 0.03 s (26.9 %)\n * lowering.IET: 0.05 s (44.8 %)\n * specializing.IET: 0.03 s (26.9 %)\nFlops reduction after symbolic optimization: [17 --> 10]\n" ] ], [ [ "The time steps are advanced through a Python loop where both operators `optime` and `oppres`are called. Note the use of module indices to get proper buffers. As the operators will be applied at each step, the logging detail is expected to be too much. So, it is more convenient to switch from `DEBUG` to `INFO`", "_____no_output_____" ] ], [ [ "# NBVAL_IGNORE_OUTPUT\nconfiguration['log-level'] = 'INFO'\n\npsave =np.empty ((time_range.num,model.grid.shape[0],model.grid.shape[1]))\nniter_poisson = 1200\n\n# This is the time loop.\nfor step in range(0,time_range.num-2):\n q.data[:,:]=pp.data[(niter_poisson+1)%2,:,:]\n optime(time_m=step, time_M=step, dt=dt)\n pp.data[:,:]=0.\n b.data[:,:]=p.data[(step+1)%3,:,:]\n oppres(time_M = niter_poisson)\n psave[step,:,:]=p.data[(step+1)%3,:,:]", "Operator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.03 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\nOperator `Kernel` ran in 0.01 s\n" ], [ "# Some useful definitions for plotting if nbl is set to any other value than zero\nnxpad,nzpad = shape[0] + 2 * nbl, shape[1] + 2 * nbl\nshape_pad = np.array(shape) + 2 * nbl\norigin_pad = tuple([o - s*nbl for o, s in zip(origin, spacing)])\nextent_pad = tuple([s*(n-1) for s, n in zip(spacing, shape_pad)])", "_____no_output_____" ] ], [ [ "We can plot equally spaced snaps (by `factor`) from the full history saved in `psave` using matplotlib.", "_____no_output_____" ] ], [ [ "# NBVAL_IGNORE_OUTPUT\n\n# Note: flip sense of second dimension to make the plot positive downwards\nplt_extent = [origin_pad[0], origin_pad[0] + extent_pad[0],\n origin_pad[1] + extent_pad[1], origin_pad[1]]\n\n# Plot the wavefields, each normalized to scaled maximum of last time step\nkt = (time_range.num - 2) - 1\namax = 0.05 * np.max(np.abs(psave[kt,:,:]))\nprint(\"amax; %12.6f\" % (amax))\n\nnsnaps = 10\nfactor = round(time_range.num/nsnaps)\n\nfig, axes = plt.subplots(2, 5, figsize=(18, 7), sharex=True)\nfig.suptitle(\"Snapshots\", size=14)\nfor count, ax in enumerate(axes.ravel()):\n snapshot = factor*count\n ax.imshow(np.transpose(psave[snapshot,:,:]), cmap=\"seismic\",\n vmin=-amax, vmax=+amax, extent=plt_extent)\n ax.plot(model.domain_size[0]* .5, model.domain_size[1]* .5, \\\n 'red', linestyle='None', marker='*', markersize=15, label=\"Source\")\n ax.grid()\n ax.tick_params('both', length=2, width=0.5, which='major',labelsize=10)\n ax.set_title(\"Wavefield at t=%.2fms\" % snapshot,fontsize=12)\nfor ax in axes[1, :]:\n ax.set_xlabel(\"X Coordinate (m)\",fontsize=10)\nfor ax in axes[:, 0]:\n ax.set_ylabel(\"Z Coordinate (m)\",fontsize=10)", "amax; 13.318243\n" ] ], [ [ "## References\n\n- **Least-squares reverse time migration in TTI media using a pure qP-wave equation** (2020)\n<br> Xinru Mu, Jianping Huang, Jidong Yang, Xu Guo, and Yundong Guo\n<br> Geophysics, Vol. 85, No. 4\n<br> https://doi.org/10.1190/geo2019-0320.1", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
4a7afa6c21c9e4ef825de974ea984ac41fc755d5
13,683
ipynb
Jupyter Notebook
Python Absolute Beginner/Module_1_Practice_1_IntroPy.ipynb
gracieforthman/pythonteachingcode
34fdbcd58209566fd40f9fdabdcd00d18625bbce
[ "MIT" ]
1
2021-01-19T17:16:57.000Z
2021-01-19T17:16:57.000Z
Python Absolute Beginner/Module_1_Practice_1_IntroPy.ipynb
gracieforthman/pythonteachingcode
34fdbcd58209566fd40f9fdabdcd00d18625bbce
[ "MIT" ]
null
null
null
Python Absolute Beginner/Module_1_Practice_1_IntroPy.ipynb
gracieforthman/pythonteachingcode
34fdbcd58209566fd40f9fdabdcd00d18625bbce
[ "MIT" ]
1
2021-01-19T17:20:40.000Z
2021-01-19T17:20:40.000Z
32.195294
976
0.498794
[ [ [ "# 1-1 Intro Python Practice\n## Getting started with Python in Jupyter Notebooks\n### notebooks, comments, print(), type(), addition, errors and art\n\n<font size=\"5\" color=\"#00A0B2\" face=\"verdana\"> <B>Student will be able to</B></font>\n- use Python 3 in Jupyter notebooks\n- write working code using `print()` and `#` comments \n- write working code using `type()` and variables\n- combine strings using string addition (+)\n- add numbers in code (+)\n- troubleshoot errors\n- create character art \n\n# &nbsp;\n>**note:** the **[ ]** indicates student has a task to complete \n \n>**reminder:** to run code and save changes: student should upload or clone a copy of notebooks \n\n#### notebook use\n- [ ] insert a **code cell** below \n- [ ] enter the following Python code, including the comment: \n```python \n# [ ] print 'Hello!' and remember to save notebook!\nprint('Hello!')\n```\nThen run the code - the output should be: \n`Hello!`", "_____no_output_____" ], [ "#### run the cell below \n- [ ] use **Ctrl + Enter** \n- [ ] use **Shift + Enter** ", "_____no_output_____" ] ], [ [ "print('watch for the cat')", "_____no_output_____" ] ], [ [ "#### Student's Notebook editing\n- [ ] Edit **this** notebook Markdown cell replacing the word \"Student's\" above with your name\n- [ ] Run the cell to display the formatted text\n- [ ] Run any 'markdown' cells that are in edit mode, so they are easier to read", "_____no_output_____" ], [ "#### [ ] convert \\*this\\* cell from markdown to a code cell, then run it \nprint('Run as a code cell')\n", "_____no_output_____" ], [ "## # comments\ncreate a code comment that identifies this notebook, containing your name and the date", "_____no_output_____" ], [ "#### use print() to \n- [ ] print [**your_name**]\n- [ ] print **is using python!**", "_____no_output_____" ] ], [ [ "# [ ] print your name\n\n# [ ] print \"is using Python\"\n\n", "_____no_output_____" ] ], [ [ "Output above should be: \n`Your Name \nis using Python!` ", "_____no_output_____" ], [ "#### use variables in print()\n- [ ] create a variable **your_name** and assign it a string containing your name\n- [ ] print **your_name**", "_____no_output_____" ] ], [ [ "# [ ] create a variable your_name and assign it a sting containing your name\n\n#[ ] print your_name\n\n", "_____no_output_____" ] ], [ [ "#### create more string variables\n- **[ ]** create variables as directed below\n- **[ ]** print the variables", "_____no_output_____" ] ], [ [ "# [ ] create variables and assign values for: favorite_song, shoe_size, lucky_number\n\n\n# [ ] print the value of each variable favorite_song, shoe_size, and lucky_number\n\n\n", "_____no_output_____" ] ], [ [ "#### use string addition\n- **[ ]** print the above string variables (favorite_song, shoe_size, lucky_number) combined with a description by using **string addition**\n>for example favorite_song displayed as: \n`favorite song is happy birthday`", "_____no_output_____" ] ], [ [ "# [ ] print favorite_song with description\n\n\n# [ ] print shoe_size with description\n\n\n# [ ] print lucky_number with description\n\n", "_____no_output_____" ] ], [ [ "##### more string addition\n- **[ ]** make a single string (sentence) in a variable called favorite_lucky_shoe using **string addition** with favorite_song, shoe_size, lucky_number variables and other strings as needed \n- **[ ]** print the value of the favorite_lucky_shoe variable string\n> sample output: \n`For singing happy birthday 8.5 times, you will be fined $25`", "_____no_output_____" ] ], [ [ "# assign favorite_lucky_shoe using\n\n\n", "_____no_output_____" ] ], [ [ "### print() art", "_____no_output_____" ], [ "#### use `print()` and the asterisk **\\*** to create the following shapes\n- [ ] diagonal line \n- [ ] rectangle \n- [ ] smiley face", "_____no_output_____" ] ], [ [ "# [ ] print a diagonal using \"*\"\n\n\n# [ ] rectangle using \"*\"\n\n\n# [ ] smiley using \"*\"\n\n\n", "_____no_output_____" ] ], [ [ "#### Using `type()`\n-**[ ]** calulate the *type* using `type()`", "_____no_output_____" ] ], [ [ "# [ ] display the type of 'your name' (use single quotes)\n\n\n", "_____no_output_____" ], [ "# [ ] display the type of \"save your notebook!\" (use double quotes)\n\n\n", "_____no_output_____" ], [ "# [ ] display the type of \"25\" (use quotes)\n\n\n", "_____no_output_____" ], [ "# [ ] display the type of \"save your notebook \" + 'your name'\n\n\n", "_____no_output_____" ], [ "# [ ] display the type of 25 (no quotes)\n\n\n", "_____no_output_____" ], [ "# [ ] display the type of 25 + 10 \n\n\n", "_____no_output_____" ], [ "# [ ] display the type of 1.55\n\n\n", "_____no_output_____" ], [ "# [ ] display the type of 1.55 + 25\n\n\n", "_____no_output_____" ] ], [ [ "#### Find the type of variables\n- **[ ]** run the cell below to make the variables available to be used in other code\n- **[ ]** display the data type as directed in the cells that follow", "_____no_output_____" ] ], [ [ "# assignments ***RUN THIS CELL*** before starting the section\n\nstudent_name = \"Gus\"\nstudent_age = 16\nstudent_grade = 3.5\nstudent_id = \"ABC-000-000\"\n", "_____no_output_____" ], [ "# [ ] display the current type of the variable student_name\n\n\n", "_____no_output_____" ], [ "# [ ] display the type of student_age\n\n\n", "_____no_output_____" ], [ "# [ ] display the type of student_grade\n\n\n", "_____no_output_____" ], [ "# [ ] display the type of student_age + student_grade\n\n\n", "_____no_output_____" ], [ "# [ ] display the current type of student_id\n\n\n", "_____no_output_____" ], [ "# assign new value to student_id \n\n\n# [ ] display the current of student_id\n\n\n", "_____no_output_____" ] ], [ [ "#### number integer addition\n\n- **[ ]** create variables (x, y, z) with integer values", "_____no_output_____" ] ], [ [ "# [ ] create integer variables (x, y, z) and assign them 1-3 digit integers (no decimals - no quotes)\n\n\n", "_____no_output_____" ] ], [ [ "- **[ ]** insert a **code cell** below\n- **[ ]** create an integer variable named **xyz_sum** equal to the sum of x, y, and z\n- **[ ]** print the value of **xyz_sum** ", "_____no_output_____" ] ], [ [ "\n\n", "_____no_output_____" ] ], [ [ "### Errors\n- **[ ]** troubleshoot and fix the errors below", "_____no_output_____" ] ], [ [ "# [ ] fix the error \n\nprint(\"Hello World!\"\") \n\n\n", "_____no_output_____" ], [ "# [ ] fix the error \nprint(strings have quotes and variables have names)\n\n", "_____no_output_____" ], [ "# [ ] fix the error \nprint( \"I have $\" + 5)\n\n", "_____no_output_____" ], [ "# [ ] fix the error \nprint('always save the notebook\")\n \n", "_____no_output_____" ] ], [ [ "## ASCII art\n- **[ ]** Display first name or initials as ASCII Art\n- **[ ]** Challenge: insert an additional code cell to make an ASCII picture", "_____no_output_____" ] ], [ [ "# [ ] ASCII ART\n\n\n", "_____no_output_____" ], [ "# [ ] ASCII ART\n\n", "_____no_output_____" ] ], [ [ "[Terms of use](http://go.microsoft.com/fwlink/?LinkID=206977) &nbsp; [Privacy & cookies](https://go.microsoft.com/fwlink/?LinkId=521839) &nbsp; © 2017 Microsoft", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
4a7afeee30b7d7a54f45b086cf3d617ad0f7821b
51,777
ipynb
Jupyter Notebook
FADAPandas.ipynb
jagatabhay/pysparktest
f6ee66cc315a5500efbbbc013ab6536d3220d1a8
[ "MIT" ]
null
null
null
FADAPandas.ipynb
jagatabhay/pysparktest
f6ee66cc315a5500efbbbc013ab6536d3220d1a8
[ "MIT" ]
null
null
null
FADAPandas.ipynb
jagatabhay/pysparktest
f6ee66cc315a5500efbbbc013ab6536d3220d1a8
[ "MIT" ]
null
null
null
31.190964
99
0.358499
[ [ [ "import pandas as pd", "_____no_output_____" ], [ "store=pd.read_csv('store_test.csv')", "_____no_output_____" ], [ "customer=pd.read_csv('customer_test.csv')", "_____no_output_____" ], [ "orders=pd.read_csv('orders_test.csv')", "_____no_output_____" ], [ "store.head()", "_____no_output_____" ], [ "customer.head()", "_____no_output_____" ], [ "orders.head()", "_____no_output_____" ] ], [ [ "### Question 1 Solution", "_____no_output_____" ] ], [ [ "orders_store = pd.merge(orders, store, left_on=\"store_id\", right_on=\"id\")", "_____no_output_____" ], [ "orders_store.head()", "_____no_output_____" ], [ "orders_store['order_date'] = pd.to_datetime(orders_store['order_date'])", "_____no_output_____" ], [ "orders_store['years'] = orders_store['order_date'].dt.year", "_____no_output_____" ], [ "orders_store['month']=orders_store['order_date'].dt.month", "_____no_output_____" ], [ "orders_store.head()", "_____no_output_____" ], [ "k = orders_store.groupby(['years','month','name'])\\\n .agg({'store_id':\"count\",'total':sum})\\\n .reset_index()", "_____no_output_____" ], [ "k.columns = ['years','month','name','TotalOrders','TotalRevenue']", "_____no_output_____" ], [ "k.head(20)", "_____no_output_____" ], [ "k.to_csv('Quest1Soln.csv')", "_____no_output_____" ] ], [ [ "### Question 2 Solution", "_____no_output_____" ] ], [ [ "cust_order = pd.merge(customer, orders, left_on=\"id\", right_on=\"customer_id\")", "_____no_output_____" ], [ "s = cust_order.groupby(['first_name','last_name','email','customer_id'])\\\n .customer_id.count()\\\n .reset_index(name='OrderPlacedByUser')", "_____no_output_____" ], [ "s.loc[s['OrderPlacedByUser']<10,['first_name','last_name','email','OrderPlacedByUser']]", "_____no_output_____" ], [ "s.loc[s['OrderPlacedByUser']<10,['first_name','last_name','email','OrderPlacedByUser']]\\\n .to_csv('Quest2Soultion.csv')", "_____no_output_____" ] ], [ [ "### Question3 Solution", "_____no_output_____" ] ], [ [ "import hashlib", "_____no_output_____" ], [ "df3 = s.loc[s['OrderPlacedByUser']<10,['first_name','last_name','email','OrderPlacedByUser']]", "_____no_output_____" ], [ "df3['email'] = [hashlib.md5(val.encode('UTF-8')).hexdigest() for val in df3['email']]", "_____no_output_____" ], [ "df3.head()", "_____no_output_____" ], [ "df3.to_csv('Quest3Soln.csv')", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
4a7afeefffd07b43647c47df815a98f5387344e0
13,283
ipynb
Jupyter Notebook
apps/kg/Untitled.ipynb
mayi140611/mayiutils
5340d7bd4590e2a41afd5d02ffc569745d67c866
[ "Apache-2.0" ]
null
null
null
apps/kg/Untitled.ipynb
mayi140611/mayiutils
5340d7bd4590e2a41afd5d02ffc569745d67c866
[ "Apache-2.0" ]
null
null
null
apps/kg/Untitled.ipynb
mayi140611/mayiutils
5340d7bd4590e2a41afd5d02ffc569745d67c866
[ "Apache-2.0" ]
null
null
null
27.905462
120
0.386283
[ [ [ "#### https://mp.weixin.qq.com/s/APICRoCY2AQKS_F62LaLAg", "_____no_output_____" ] ], [ [ "import tushare as ts\nimport csv\nimport time\nimport pandas as pd\n\npro = ts.pro_api('5fd1639100f8a22b7f86e882e03192009faa72bae1ae93803e1172d5')", "_____no_output_____" ], [ "# 获取stock_basic\nstock_basic = pro.stock_basic(list_status='L', fields='ts_code, symbol, name, industry')\n# 重命名行,便于后面导入neo4j\nbasic_rename = {'ts_code': 'TS代码', 'symbol': '股票代码', 'name': '股票名称', 'industry': '行业'}\nstock_basic.rename(columns=basic_rename, inplace=True)\n# 保存为stock.csv\nstock_basic.to_csv('stock.csv', encoding='gbk')\nstock_basic.head()", "_____no_output_____" ], [ "stock_basic.shape", "_____no_output_____" ], [ "# 获取top10_holders\nholders = pd.DataFrame(columns=('ts_code', 'ann_date', 'end_date', 'holder_name', 'hold_amount', 'hold_ratio'))\n# 获取一年内所有上市股票股东信息(可以获取一个报告期的)\nfor i in range(3610):\n code = stock_basic['TS代码'].values[i]\n top10_holders = pro.top10_holders(ts_code=code, start_date='20180101', end_date='20181231')\n holders = holders.append(top10_holders)\n time.sleep(0.8)# 数据接口限制\n# 保存为holders.csv\nholders.to_csv('holders.csv', encoding='gbk')\nholders.head()", "_____no_output_____" ], [ "# 获取concept,并查看概念分类数量\nconcept = pro.concept()\nconcept.to_csv('concept_num.csv', encoding='gbk')\nconcept.head()", "_____no_output_____" ], [ "# 获取concept_detail\nconcept_details = pd.DataFrame(columns=('id', 'concept_name', 'ts_code', 'name'))\nfor i in range(358):\n id = 'TS' + str(i)\n concept_detail = pro.concept_detail(id=id)\n concept_details = concept_details.append(concept_detail)\n time.sleep(0.6)\n# 保存为concept_detail.csv\nconcept_details.to_csv('concept.csv', encoding='gbk')\nconcept_details.head()", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
4a7b0a68d666beb08350359789054d84adecc301
18,205
ipynb
Jupyter Notebook
practicals/numerics/CFL_condition.ipynb
hannahwoodward/geog0121
57a0974a07fa97356004847e59b0f24e659d67e6
[ "MIT" ]
null
null
null
practicals/numerics/CFL_condition.ipynb
hannahwoodward/geog0121
57a0974a07fa97356004847e59b0f24e659d67e6
[ "MIT" ]
null
null
null
practicals/numerics/CFL_condition.ipynb
hannahwoodward/geog0121
57a0974a07fa97356004847e59b0f24e659d67e6
[ "MIT" ]
null
null
null
39.835886
482
0.604614
[ [ [ "# An exercise in discretisation and the CFL criterion\n*These notebooks have been built from Lorena Barba's Computational Fluid Dynamics module. Here we are going to go from a (simple) equation, to a numerical solution of it. We are then going to look at how changing the resolution impacts the speed and validity of the program.*\n\n*Barba, Lorena A., and Forsyth, Gilbert F. (2018). CFD Python: the 12 steps to Navier-Stokes equations. Journal of Open Source Education, 1(9), 21, https://doi.org/10.21105/jose.00021*\n\n\n## Step 1: 1-D Linear Convection ", "_____no_output_____" ], [ "The 1-D Linear Convection equation is the simplest, most basic model that can be used to learn something about CFD. \nHere it is, where $w$ is the vertical veolcity and we're using height, $z$, as the vertical coordinate:\n\n$$\\frac{\\partial w}{\\partial t} + c \\frac{\\partial w}{\\partial z} = 0$$\n\nWith given initial conditions (understood as a wave), the equation represents the propagation of that initial wave with speed $c$, without change of shape. Let the initial condition be $w(z,0)=w_0(z)$. Then the exact solution of the equation is $w(z,t)=w_0(z-ct)$.\n\nWe discretise this equation in both space and time, using the Forward Difference scheme for the time derivative and the Backward Difference scheme for the space derivative. Consider discretising the spatial coordinate $x$ into points that we index from $i=0$ to $N$, and stepping in discrete time intervals of size $\\Delta t$.\n\nFrom the definition of a derivative (and simply removing the limit), we know that:\n\n$$\\frac{\\partial w}{\\partial z}\\approx \\frac{w(z+\\Delta z)-w(z)}{\\Delta z}$$\n\nOur discrete equation, then, is:\n$$\\frac{w_i^{n+1}-w_i^n}{\\Delta t} + c \\frac{w_i^n - w_{i-1}^n}{\\Delta z} = 0 $$\n\nWhere $n$ and $n+1$ are two consecutive steps in time, while $i-1$ and $i$ are two neighboring points of the discretized $z$ coordinate. If there are given initial conditions, then the only unknown in this discretization is $w_i^{n+1}$. \n\nWe can solve for our unknown to get an equation that allows us to advance in time, as follows:\n\n$$w_i^{n+1} = w_i^n - c \\frac{\\Delta t}{\\Delta z}(w_i^n-w_{i-1}^n)$$\n", "_____no_output_____" ], [ "Now let's try implementing this in Python.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport time\nimport matplotlib.pyplot as plt \n%matplotlib inline", "_____no_output_____" ] ], [ [ "First, define a few variables... \n(1) Define an evenly spaced grid of points within a spatial domain that is 2 units of length wide, i.e., $z_i\\in(0,2)$. \n (2) define a variable nz, which will be the number of grid points we want and dz will be the distance between any pair of adjacent grid points.", "_____no_output_____" ] ], [ [ "total_height = 2.0 # height of the model (in m)\ndt = 0.025 # dt is the length of each timestep\nnz = 41 # define the number of grid points \ndz = total_height / (nz-1) # define the distance between any pair of adjacent grid points (delta z)\nnt = 20 #nt is the number of timesteps we want to calculate\nc = 1. #assume wavespeed of c = 1 m/s", "_____no_output_____" ] ], [ [ "Then we need to set up our initial conditions... \nThe initial velocity $w_0$ is given as $w = 2$ in the interval $0.5 \\leq z \\leq 1$ and $w = 1$ everywhere else in $(0,2)$ (i.e., a hat function).", "_____no_output_____" ] ], [ [ "w_0 = np.ones(nz) #numpy function ones() makes an array \nw_0[int(.5 / dz):int(1 / dz + 1)] = 2. #setting w_0 = 2 if 0.5<=z<=1, setting w_0=1 elsewhere\nprint(w_0) # it shows us a hat function", "_____no_output_____" ], [ "# Let's take a look at those initial conditions \nplt.plot(w_0, np.linspace(0, total_height, nz))", "_____no_output_____" ] ], [ [ "Now it's time to implement the discretisation of the convection equation using a finite-difference scheme.\nFor every element of our array u, we need to perform the operation $$w_i^{n+1} = w_i^n - c \\frac{\\Delta t}{\\Delta z}(w_i^n-w_{i-1}^n)$$\n\nWe'll store the result in a new (temporary) array `wn`, which will be the solution $z$ for the next time-step. We will repeat this operation for as many time-steps as we specify and then we can see how far the wave has convected.", "_____no_output_____" ], [ "(1) Initialise our placeholder array `wn` to hold the values we calculate for the $n+1$ timestep.\n (2) We have two iterative operations: one in space and one in time (we'll learn differently later), so we'll start by nesting one loop inside the other. Note: when we write: for i in range(1, nz) we will iterate through the w array, but we'll be skipping the first element (the zero-th element).", "_____no_output_____" ] ], [ [ "wn = np.ones(nz) #Set the velocity as the initial conditions at the beginning of the run\nw = w_0.copy()\n\n# In each timestep(20 timesteps in total), iterate through all the grid points...\n#...then repeat the iteration for all the timesteps \n\nfor n in range(0, nt): #loop for values of n from 0 to nt, so it will run nt times\n wn = w.copy() #copy the existing values of w into wn\n for i in range(1, nz): # if starting from the zero-th element, it will crash, due to the value un[i-1] doesn't exist\n w[i] = wn[i] - c * dt / dz * (wn[i] - wn[i-1]) ", "_____no_output_____" ], [ "# Now let's try plotting our u array after advancing in time.\nplt.plot(w_0,np.linspace(0, total_height, nz),label = \"initial conditions\")\nplt.plot(w,np.linspace(0, total_height, nz),label = \"At end of run\")\nplt.legend() ", "_____no_output_____" ] ], [ [ "# Exploring convergence and the CFL criterion", "_____no_output_____" ], [ "Above we used a grid with 41 points (nz = 41) and a timestep is 0.025 seconds (dt = 0.025). You can see that the \"hat\" function has not just been pushed upwards (as the analytical solution of the equation suggests should happen). It has also been smoothed out a bit, because of a process called [\"numerical diffusion\"](https://en.wikipedia.org/wiki/Numerical_diffusion). This is where the discretisation we used introduces a spurious spreading out of the single pulse. \n\nThe amount of numerical diffusion will depend on the coarseness of our grid. So now, we'll going to experiment with increasing the size of our grid to get a more accurate solution. ", "_____no_output_____" ], [ "We can do it by defining a new function, so that we can easily examine what happens as we adjust just one variable: the grid size (nz)", "_____no_output_____" ] ], [ [ "# define a function called 'linearconv()', it allow us to change the number of grid points in over a 2m layer \n\ndef linearconv(nz):\n dz = 2 / (nz - 1) #dz is the distance between any pair of adjacent grid points\n nt = 20 #nt is the number of timesteps we want to calculate\n dt = .025 #dt is the amount of time each timestep covers \n c = 1\n\n w = np.ones(nz) #defining a numpy array which is nx elements long with every value equal to 1.\n w[int(.5/dz):int(1 / dz + 1)] = 2 #setting w = 2 if 0.5<=z<=1, setting w=1 if 0<z<0.5 or 1<z<2\n w_0=w.copy()\n \n wn = np.ones(nz) #initializing our placeholder array, zn, to hold the values we calculate for the n+1 timestep\n\n for n in range(0, nt): #iterate through time\n wn = w.copy() #copy the existing values of w into wn\n for i in range(1, nz):\n w[i] = wn[i] - c * dt / dz * (wn[i] - wn[i-1]) # using 1-D linear convection equation\n \n plt.plot(w_0,np.linspace(0, 2, nz),label = \"initial conditions\")\n plt.plot(w,np.linspace(0, 2, nz),label = \"At end of run\")\n plt.legend()\n", "_____no_output_____" ] ], [ [ "Now let's examine the results of our linear convection problem with an increasingly fine mesh", "_____no_output_____" ] ], [ [ "# Now reproduce the plot above for reference:\n\nlinearconv(41) #convection using 41 grid points", "_____no_output_____" ], [ "# Increase the number of grid points\n# still numerical diffusion present, but it is less severe (curve less smooth).\n\nlinearconv(61)", "_____no_output_____" ], [ "# the same pattern is present -- the wave is more square than in the previous runs\n\nlinearconv(71)", "_____no_output_____" ], [ "#completely changed to square curves\n\nlinearconv(81)", "_____no_output_____" ], [ "linearconv(85)\n\n# This doesn't look anything like our original hat function.", "_____no_output_____" ] ], [ [ "Why does this happen?\n\nIn each iteration of our time loop, we use the existing data about our wave to estimate the speed of the wave in the subsequent time step. Initially, the increase in the number of grid points returned more accurate answers. There was less numerical diffusion and the square wave looked much more like a square wave than it did in our first example.\n\nEach iteration of our time loop covers a time-step of length $\\Delta t$, which we have been defining as 0.025.\nDuring this iteration, we evaluate the speed of the wave at each of the $z$ points we've created. In the last plot, something has clearly gone wrong.\n\nWhat has happened is that over the time period $\\Delta t$, the wave is travelling a distance which is greater than dz. \n\nThe length dz of each grid box is related to the number of total points nz, so stability can be enforced if the $\\Delta t$ step size is calculated with respect to the size of $dz$.\n\n$$\\sigma = \\frac{c \\Delta t}{\\Delta z} \\leq \\sigma_{\\max}$$\n\nwhere $c$ is the speed of the wave; $\\sigma$ is called the Courant number and the value of $\\sigma_{\\max}$ that will ensure stability depends on the kind of discretisation used. Overall this equation is called the CFL criterion. We will use to calculate the appropriate time-step $dt$ depending on the vertical resolution.\n", "_____no_output_____" ] ], [ [ "# Re-define the function 'linearconv()' as 'linearconv_CFL(nz)' but make the timestep change dynamically with the grid resolution\n\ndef linearconv_CFL(nz):\n\n dz = 2 / (nz - 1) #dz is the distance between two adjacent grid points\n run_length = 0.5 # which is the same as before - i.e. 20*0.025\n c = 1\n sigma = .5 # sigma is a Courant number \n \n dt = sigma * dz # now, the amount of time that each timestep covers, is calculated with respect to the size of dz...\n # ...so, stability is enforced (the value of dt now depends on dz)\n nt = int(1 + run_length / dt)\n \n w = np.ones(nz) #defining a numpy array which is nx elements long with every value equal to 1.\n w[int(.5/dz):int(1 / dz + 1)] = 2 #setting w = 2 if 0.5<=z<=1, setting w=1 if 0<z<0.5 or 1<z<2\n w_0=w.copy()\n\n wn = np.ones(nz)\n\n tic = time.perf_counter() # store the time at the beginning of the loop\n for n in range(nt): #iterate through timestep\n wn = w.copy() \n for i in range(1, nz):\n w[i] = wn[i] - c * dt / dz * (wn[i] - wn[i-1]) \n \n toc = time.perf_counter() # store the time at the end of the loop\n time_taken_millisec=(toc-tic)*10e6\n print(f\"The model took {time_taken_millisec:0.4f} milliseconds to run\")\n\n plt.plot(w_0,np.linspace(0, 2, nz),label = \"initial conditions\")\n plt.plot(w,np.linspace(0, 2, nz),label = \"At end of run\")\n plt.legend()\n\n return(time_taken_millisec) # return the wallclock time for the model to complete\n ", "_____no_output_____" ], [ "runtime_nz41=linearconv_CFL(41)", "_____no_output_____" ], [ "runtime_nz61=linearconv_CFL(61)", "_____no_output_____" ], [ "runtime_nz81=linearconv_CFL(81) \n\n# Compare to linearconv (41), the number of grid points (nx) doubled (from 41 to 81)... \n# ...which means you have changed to a higher resolution\n\n# The distance between any pair of adjacent grid points (dx) has decreased 1/2 (from 0.05 to 0.025)\n\n# Then, the amount of time each timestep covers (dt) will be changed as well...\n# ...it depends on dx and also controlled by the value of sigma (in order to enfore stability)...\n# ...so, in this example, dt has decresed 1/2 (from 0.025sec to 0.0125 sec)\n\n\n# After changing all the variables (nx,dx,dt), iterate through all the grid points in the first timestep...\n# ...then do the same iteration for the second timestep....\n# ...until complete all the timesteps", "_____no_output_____" ], [ "runtime_nz101=linearconv_CFL(101)", "_____no_output_____" ], [ "runtime_nz121=linearconv_CFL(121)", "_____no_output_____" ] ], [ [ "### Summary \n\nLooking all the plots above, you can see that as the number of grid points ($nz$) increases, the convected wave is resolved more accurately (i.e. it becomes more square).\n\nHowever there is a serious downside to the increasing the resolution - it takes much longer to compute. As the numbers of vertical grid points is increased, it intuitively makes sense that the model will need more computations. For example, as $nz$ increases from 41 to 121 we have tripled the number of gridpoints. So does the time taken for the computation increase by a factor of 3 as well? Let's find out...", "_____no_output_____" ] ], [ [ "factor=runtime_nz121/runtime_nz41\nprint(factor)", "_____no_output_____" ] ], [ [ "No, it isn't just a tripling. And the reason for this is again down the CFL criterion.\n\nWhen I ran the code, I got a factor of around 10. The actual value you find will depend on what machine you're using to run this notebook and what else is happening on the machine at the time. \n\nAs we reduced distance between grid points by 1/3, we also needed to reduce the timestep by a similar factor. Therefore the amount of computations has gone up by $3^2$. And that's a theoretical baseline, more computations can run into more inefficiencies and make the run length even longer.\n\nFinally, remember that this is a simple example in 1D. A real climate model is 3D meaning that if you increase the grid resolution by a factor of 3, your number of computations would go up by $3^4$. So you would expect the run to take 81 times as long! ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
4a7b1353c55e9dbb0f28b555eb3e54e0c6815af5
1,295
ipynb
Jupyter Notebook
Python3/Anaconda-Jupyter/Python181103-035.ipynb
UncleLincoln/trainee
eb9f4be00e80fddd0ab3d3e6ea9a20c55f5bcab8
[ "MIT" ]
36
2018-11-03T01:37:30.000Z
2019-04-07T19:52:34.000Z
Python3/Anaconda-Jupyter/Python181103-035.ipynb
UncleLincoln/trainee
eb9f4be00e80fddd0ab3d3e6ea9a20c55f5bcab8
[ "MIT" ]
8
2020-11-13T19:06:32.000Z
2022-01-13T03:24:20.000Z
Python3/Anaconda-Jupyter/Python181103-035.ipynb
BuErTech/trainee
eb9f4be00e80fddd0ab3d3e6ea9a20c55f5bcab8
[ "MIT" ]
86
2018-11-03T01:38:25.000Z
2019-04-07T05:55:02.000Z
19.328358
59
0.450193
[ [ [ "# Python3 练习实例35\n**题目:**文本颜色设置。\n\n**程序分析:**无。\n## 实例", "_____no_output_____" ] ], [ [ "#!/usr/bin/python\n# -*- coding: UTF-8 -*- \n\nclass bcolors: \n HEADER = '\\033[95m' \n OKBLUE = '\\033[94m' \n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m' \n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m' \n\nprint (bcolors.WARNING + \"警告的颜色字体?\" + bcolors.ENDC)", "\u001b[93m警告的颜色字体?\u001b[0m\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code" ] ]
4a7b282c5c3e6e17fefa0cf7a44c42952227e864
3,333
ipynb
Jupyter Notebook
models/bilinear_test.ipynb
adriancampos/road-extraction
3eaf4ed010d71475276d99d4841d67990a967a1b
[ "MIT" ]
1
2019-07-12T20:17:24.000Z
2019-07-12T20:17:24.000Z
models/bilinear_test.ipynb
adriancampos/road-extraction
3eaf4ed010d71475276d99d4841d67990a967a1b
[ "MIT" ]
null
null
null
models/bilinear_test.ipynb
adriancampos/road-extraction
3eaf4ed010d71475276d99d4841d67990a967a1b
[ "MIT" ]
null
null
null
29.236842
511
0.562256
[ [ [ "import torch\nimport torch.nn as nn", "_____no_output_____" ], [ "def deconv(in_size, kernel_size=3,stride=2):\n # Input Size * Stride + max(Filter Size - Stride, 0)\n return in_size * stride + max(kernel_size - stride, 0)", "_____no_output_____" ], [ "\n\n\nn = 200\n\nA = torch.randn(3,n,n)\n\nprint(deconv(n))\n\nprint(nn.Upsample(scale_factor=2,mode='bilinear')(A.unsqueeze(0)).shape)\n\nprint(nn.ConvTranspose2d(in_channels=3, out_channels=3,kernel_size=3, stride=2, padding=0, bias=True)(A.unsqueeze(0)).shape)\n", "401\ntorch.Size([1, 3, 400, 400])\ntorch.Size([1, 3, 401, 401])\n" ], [ "debug_batch_size=None\nprint(4 debug_batch_size)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
4a7b2931d5d1dad4e314514907692e5f60272cba
754,379
ipynb
Jupyter Notebook
.ipynb_checkpoints/clust-checkpoint.ipynb
lrnzgiusti/Extreme-Rare-Event-Detector
6bf55ad0113aae76e1ed6b2aaeb8362b5cc55304
[ "MIT" ]
null
null
null
.ipynb_checkpoints/clust-checkpoint.ipynb
lrnzgiusti/Extreme-Rare-Event-Detector
6bf55ad0113aae76e1ed6b2aaeb8362b5cc55304
[ "MIT" ]
null
null
null
.ipynb_checkpoints/clust-checkpoint.ipynb
lrnzgiusti/Extreme-Rare-Event-Detector
6bf55ad0113aae76e1ed6b2aaeb8362b5cc55304
[ "MIT" ]
null
null
null
44.064194
39,572
0.565199
[ [ [ "import plotly.express as px\nimport pandas as pd\nimport plotly.graph_objects as go\nimport pickle\nfrom plotly.subplots import make_subplots\nimport numpy as np\nimport os\nimport Loader\n\nfrom scipy.spatial import ConvexHull, distance_matrix", "_____no_output_____" ], [ "loader = Loader.Loader(r\"C:\\Users\\logiusti\\Lorenzo\\Data\\ups\")", "('load_commission_dates', 0.02) Args: ('C:\\\\Users\\\\logiusti\\\\Lorenzo\\\\Data\\\\Grapheable\\\\commission_date_df.csv',)\n('load_temperatures', 0.11) Args: ()\n('retrive_ups_name_list', 0.0) Args: ()\n('get_all_eta', 0.02) Args: ()\n('count_clicks', 0.0) Args: ()\n" ], [ "def remap(x):\n \"\"\"\n parametrizzare\n \"\"\"\n return max(0,x-3.5)\n\ndef get_one_pof(p0, p, eta, clicks):\n \"\"\"\n parametrizzare\n \"\"\"\n distance = 1-(1/(1+np.linalg.norm(p0-p, 1)))\n pof_eta_load = remap(np.sqrt(eta**.5 + clicks**.5))\n pof = distance*pof_eta_load**.5\n return pof\n\ndef get_p0_name(df):\n # test points\n pts = df[[2, 3, 4]].to_numpy()\n\n # two points which are fruthest apart will occur as vertices of the convex hull\n candidates = pts[ConvexHull(pts).vertices]\n\n # get distances between each pair of candidate points\n dist_mat = distance_matrix(candidates, candidates)\n\n # get indices of candidates that are furthest apart\n i, j = np.unravel_index(dist_mat.argmax(), dist_mat.shape)\n\n #get the data into the df according to the most distance points\n tmp_df = df[(df[[2, 3, 4]].to_numpy() == candidates[j]) |\n (df[[2, 3, 4]].to_numpy() == candidates[i])]\n\n #return the one who has lower clicks and lower age\n return tmp_df.assign(f=tmp_df['eta']**2 * tmp_df['clicks']**2)\\\n .sort_values('f')\\\n .drop('f', axis=1)\\\n .iloc[0]['UPS']\n\ndef get_all_pofs(df):\n v = []\n p0 = df.loc[df['UPS'] == get_p0_name(df)][[2, 3, 4]].to_numpy()\n for _, row in df.iterrows():\n p = np.array([row[2], row[3], row[4]])\n v.append(get_one_pof(p0, p, row['eta'], row['clicks']))\n return pd.Series(v)", "_____no_output_____" ], [ "def load_df(path): \n\n if os.path.isfile(r\"\"+path):\n with open(r\"\"+path, \"rb\") as input_file:\n df = pickle.load(input_file)\n\n ups_to_clicls = pd.DataFrame(list(loader.count_clicks().items()), columns=['UPS', 'clicks'])\n df = df.merge(ups_to_clicls, how='inner', on = 'UPS')\n\n columns = df.columns.tolist()\n columns = columns[:2] + [columns[-1]] + columns[2:5]#-1] ##this -1 is the desired level of triming\n \n df['pof'] = get_all_pofs(df)\n \n thermal_runaways = df.loc[df['UPS'] == \"EBS2C06_SLASH_BL1\"]\n thermal_runaways = thermal_runaways.append(df.loc[df['UPS'] == \"ESS328_SLASH_5E\"])\n thermal_runaways = thermal_runaways.append(df.loc[df['UPS'] == \"ESS329_SLASH_7E\"])\n\n \n return (thermal_runaways, df)\n\ndef make_plot(path, title, use_out=True):\n fig = make_subplots(\n rows=1, cols=2,\n specs=[[{'type': 'scatter3d'}, {'type': 'scatter3d'}]]\n )\n \n \n \n thermal_runaways, df = load_df(path)\n \n fig.add_scatter3d(x=df[2], y = df[3], z = df[4], marker=dict(color=df['eta'], colorscale='Tealrose'), \n hovertext=df['UPS'] + \"_\" + df['eta'].map(str) + \"_\" + df['clicks'].map(str), \n showlegend=False, name=\"\", mode='markers', row=1,col=1, )\n\n fig.add_scatter3d(x=thermal_runaways[2], y = thermal_runaways[3], z = thermal_runaways[4], \n marker=dict(color='rgb(255,0,0)'), \n hovertext=thermal_runaways['UPS'] + \"_\" + \n thermal_runaways['eta'].map(str) + \"_\" + \n thermal_runaways['clicks'].map(str), \n showlegend=False, name=\"\", mode='markers', row=1,col=1)\n\n\n fig.add_scatter3d(x=df[2], y = df[3], z = df[4], marker=dict(color=df['pof'], colorscale='Tealrose'), \n hovertext=df['UPS'] + \"_\" + df['pof'].map(str), hoverlabel=dict(bgcolor=px.colors.diverging.Tealrose) ,\n showlegend=False, name=\"\", mode='markers', row=1,col=2)\n\n fig.add_scatter3d(x=thermal_runaways[2], y = thermal_runaways[3], z = thermal_runaways[4], marker=dict(color='rgb(255,0,0)'), \n hovertext=thermal_runaways['UPS'] + \"_\" + thermal_runaways['pof'].map(str), \n showlegend=False, name=\"\", mode='markers', row=1,col=2)\n\n fig.update_layout(title_text=title)\n fig.show()", "_____no_output_____" ], [ "make_plot(r\"C:\\Users\\logiusti\\Lorenzo\\PyWorkspace\\scripts\\Wrapper\\data\\filtered_dT.pickle\", 'Grad')", "('count_clicks', 0.0) Args: ()\n" ], [ "make_plot(r\"C:\\Users\\logiusti\\Lorenzo\\PyWorkspace\\scripts\\Wrapper\\data\\filtered_energy_of_dTemperature.pickle\", 'E')", "('count_clicks', 0.0) Args: ()\n" ], [ "make_plot(r\"C:\\Users\\logiusti\\Lorenzo\\PyWorkspace\\scripts\\Wrapper\\data\\filtered_signed_total_variation.pickle\", 'STV')", "('count_clicks', 0.0) Args: ()\n" ], [ "make_plot(r\"C:/Users/logiusti/Lorenzo/PyWorkspace/scripts/Wrapper/data/filtered_dEnergy.pickle\", 'dE')", "('count_clicks', 0.0) Args: ()\n" ], [ "make_plot(r\"C:/Users/logiusti/Lorenzo/PyWorkspace/scripts/Wrapper/data/filtered_dSTV.pickle\", 'dSTV')", "('count_clicks', 0.0) Args: ()\n" ], [ "th, df = load_df(r\"C:/Users/logiusti/Lorenzo/PyWorkspace/scripts/Wrapper/data/filtered_dEnergy.pickle\")", "('count_clicks', 0.0) Args: ()\n" ], [ "df['zeta'] = 0.75*df['eta']**.5 + 0.6*df['clicks']**.5\nth['zeta'] = 0.75*th['eta']**.5 + 0.6*th['clicks']**.5\nfig = go.Figure()\n\n\nfig.add_trace(go.Scatter(x=df['zeta'], y=df['pof'],hovertext=df['UPS'] + \"_\" + df['eta'].map(str) + \"_\" + df['clicks'].map(str), \n mode='markers', \n name=r'$\\frac{\\partial T}{\\partial t}$'))\n\n\nfig.add_trace(go.Scatter(x=th['zeta'], y=th['pof'],hovertext=th['UPS'] + \"_\" + th['eta'].map(str) + \"_\" + th['clicks'].map(str) ,marker=dict(color='rgb(255,0,0)'), \n mode='markers', \n name=r'$\\frac{\\partial T}{\\partial t}$'))\n\nfig.show()", "_____no_output_____" ], [ "dE = set(df.loc[df['pof'] >= .75]['UPS'])", "_____no_output_____" ], [ "E = {'EAS11_SLASH_8H',\n 'EAS1_SLASH_8H',\n 'EAS212_SLASH_MS1',\n 'EBS11_SLASH_15',\n 'EBS11_SLASH_25',\n 'EBS11_SLASH_28',\n 'EBS11_SLASH_33',\n 'EBS11_SLASH_45',\n 'EBS11_SLASH_63',\n 'EBS11_SLASH_65',\n 'EBS11_SLASH_67',\n 'EBS131_STAR_60',\n 'EBS2C06_SLASH_BL1',\n 'EBS2Z06_SLASH_BL3',\n 'EBS31_SLASH_83',\n 'ESS02_SLASH_15A',\n 'ESS103_SLASH_1R',\n 'ESS103_SLASH_2R',\n 'ESS103_SLASH_3R',\n 'ESS103_SLASH_4R',\n 'ESS103_SLASH_5E',\n 'ESS103_SLASH_6R',\n 'ESS103_SLASH_7R',\n 'ESS103_SLASH_8R',\n 'ESS11_SLASH_5H',\n 'ESS11_SLASH_P18',\n 'ESS11_STAR_59',\n 'ESS1_SLASH_5H',\n 'ESS21_SLASH_65',\n 'ESS21_SLASH_83',\n 'ESS2_SLASH_Y83',\n 'ESS316_SLASH_7E',\n 'ESS328_SLASH_5E',\n 'ESS329_SLASH_7E',\n 'ESS331_SLASH_5E',\n 'ESS3_SLASH_Y83',\n 'ESS406_SLASH_E91',\n 'ESS407_SLASH_E91'}", "_____no_output_____" ], [ "E.difference(dE)", "_____no_output_____" ], [ "th, df = load_df(r\"C:/Users/logiusti/Lorenzo/PyWorkspace/scripts/Wrapper/data/filtered_signed_total_variation.pickle\")", "('count_clicks', 0.0) Args: ()\n" ], [ "df['zeta'] = 0.75*df['eta']**.5 + 0.6*df['clicks']**.5\nth['zeta'] = 0.75*th['eta']**.5 + 0.6*th['clicks']**.5\nfig = go.Figure()\n\n\nfig.add_trace(go.Scatter(x=df['zeta'], y=df['pof'],hovertext=df['UPS'] + \"_\" + df['eta'].map(str) + \"_\" + df['clicks'].map(str), \n mode='markers', \n name=r'$\\frac{\\partial T}{\\partial t}$'))\n\n\nfig.add_trace(go.Scatter(x=th['zeta'], y=th['pof'],hovertext=th['UPS'] + \"_\" + th['eta'].map(str) + \"_\" + th['clicks'].map(str) ,marker=dict(color='rgb(255,0,0)'), \n mode='markers', \n name=r'$\\frac{\\partial T}{\\partial t}$'))\n\nfig.show()", "_____no_output_____" ], [ "STV= set(df.loc[df['pof'] >= .75]['UPS'])", "_____no_output_____" ], [ "STV", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a7b3b8ecfc76061192712867f65f54447413f4e
12,896
ipynb
Jupyter Notebook
docs/notebooks/atomic/windows/lateral_movement/SDWIN-190518213907.ipynb
onesorzer0es/Security-Datasets
6a0eec7d9a2ec6026c6ba239ad647c4f59d2a6ef
[ "MIT" ]
294
2020-08-27T01:41:47.000Z
2021-06-28T00:17:15.000Z
docs/notebooks/atomic/windows/lateral_movement/SDWIN-190518213907.ipynb
onesorzer0es/Security-Datasets
6a0eec7d9a2ec6026c6ba239ad647c4f59d2a6ef
[ "MIT" ]
18
2020-09-01T14:51:13.000Z
2021-06-22T14:12:04.000Z
docs/notebooks/atomic/windows/lateral_movement/SDWIN-190518213907.ipynb
onesorzer0es/Security-Datasets
6a0eec7d9a2ec6026c6ba239ad647c4f59d2a6ef
[ "MIT" ]
48
2020-08-31T07:30:05.000Z
2021-06-28T00:17:37.000Z
42.143791
316
0.489764
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
4a7b3e2ccfa0465c7b97c3508bbd910ebc0c48a3
13,545
ipynb
Jupyter Notebook
weeks/07-objectives.ipynb
jmduarte/capstone-particle-physics-domain
5b9f68865fbd32b87e5a29b911aba204ec1b4e12
[ "Apache-2.0" ]
4
2021-03-04T11:26:52.000Z
2021-10-08T03:26:26.000Z
weeks/07-objectives.ipynb
jmduarte/capstone-particle-physics-domain
5b9f68865fbd32b87e5a29b911aba204ec1b4e12
[ "Apache-2.0" ]
2
2021-09-09T21:48:38.000Z
2021-09-10T03:27:00.000Z
weeks/07-objectives.ipynb
jmduarte/capstone-particle-physics-domain
5b9f68865fbd32b87e5a29b911aba204ec1b4e12
[ "Apache-2.0" ]
12
2020-09-15T12:23:21.000Z
2022-03-31T03:42:55.000Z
37.109589
220
0.569435
[ [ [ "Week 7 Notebook: Optimizing Other Objectives\n===============================================================\n\nThis week, we will look at optimizing multiple objectives simultaneously. In particular, we will look at pivoting with adversarial neural networks {cite:p}`Louppe:2016ylz,ganin2014unsupervised,Sirunyan:2019nfw`.\n\nWe will borrow the implementation from: <https://github.com/glouppe/paper-learning-to-pivot>", "_____no_output_____" ] ], [ [ "import tensorflow.keras as keras\nimport numpy as np\nfrom sklearn.metrics import roc_curve, auc\nimport matplotlib.pyplot as plt\nimport uproot\nfrom tqdm.notebook import tqdm", "_____no_output_____" ], [ "import yaml\n\nwith open('definitions.yml') as file:\n # The FullLoader parameter handles the conversion from YAML\n # scalar values to Python the dictionary format\n definitions = yaml.load(file, Loader=yaml.FullLoader)\n \nfeatures = definitions['features']\nspectators = definitions['spectators']\nlabels = definitions['labels']\n\nnfeatures = definitions['nfeatures']\nnspectators = definitions['nspectators']\nnlabels = definitions['nlabels']\nntracks = definitions['ntracks']", "_____no_output_____" ] ], [ [ "## Define discriminator, regression, and combined adversarial models\nThe combined loss function is $$L = L_\\mathrm{class} - \\lambda L_\\mathrm{reg}$$\n\n- $L_\\mathrm{class}$ is the loss function for the classification part (categorical cross entropy)\n- $L_\\mathrm{reg}$ is the loss function for the adversarial part (in this case a regression)\n- $\\lambda$ is a hyperparamter that controls how important the adversarial part of the loss is compared to the classification part, which we nominally set to 1", "_____no_output_____" ] ], [ [ "from tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Input, Dense, BatchNormalization, Concatenate, GlobalAveragePooling1D\nimport tensorflow.keras.backend as K\n\n# define Deep Sets model with Dense Keras layer\ninputs = Input(shape=(ntracks, nfeatures,), name='input') \nx = BatchNormalization(name='bn_1')(inputs)\nx = Dense(64, name='dense_1', activation='relu')(x)\nx = Dense(32, name='dense_2', activation='relu')(x)\nx = Dense(32, name='dense_3', activation='relu')(x)\n# sum over tracks\nx = GlobalAveragePooling1D(name='pool_1')(x)\nx = Dense(100, name='dense_4', activation='relu')(x)\noutput = Dense(nlabels, name = 'output', activation='softmax')(x)\n \nkeras_model_disc = Model(inputs=inputs, outputs=output)\nkeras_model_disc.compile(optimizer='adam',\n loss='categorical_crossentropy')\n\n# regressor\nx = Dense(100, name='dense_5', activation='relu')(keras_model_disc(inputs))\nx = Dense(100, name='dense_6', activation='relu')(x)\noutput_reg = Dense(2, activation='linear', name='mass_pt_reg')(x)\n \n\nsgd_opt = keras.optimizers.SGD(momentum=0)\nkeras_model_reg = Model(inputs=inputs, outputs=output_reg)\nkeras_model_reg.compile(optimizer=sgd_opt,\n loss='mse')\n\n# combined model\nlam = 1\nkeras_model_adv = Model(inputs=inputs, outputs=[keras_model_disc(inputs), keras_model_reg(inputs)])\nkeras_model_adv.compile(optimizer=sgd_opt, \n loss=['categorical_crossentropy', 'mse'],\n loss_weights = [1, -lam]) \n\nprint(keras_model_disc.summary())\nprint(keras_model_reg.summary())\nprint(keras_model_adv.summary())", "_____no_output_____" ] ], [ [ "## Load data", "_____no_output_____" ] ], [ [ "from DataGenerator import DataGenerator\n# load training and validation generators \ntrain_files = ['root://eospublic.cern.ch//eos/opendata/cms/datascience/HiggsToBBNtupleProducerTool/HiggsToBBNTuple_HiggsToBB_QCD_RunII_13TeV_MC/train/ntuple_merged_10.root']\nval_files = ['root://eospublic.cern.ch//eos/opendata/cms/datascience/HiggsToBBNtupleProducerTool/HiggsToBBNTuple_HiggsToBB_QCD_RunII_13TeV_MC/train/ntuple_merged_11.root']\n\n\ntrain_generator = DataGenerator(train_files, features, labels, spectators, batch_size=1024, n_dim=ntracks, \n remove_mass_pt_window=False, \n remove_unlabeled=True, max_entry=5000,\n return_spectators=True, scale_mass_pt=[100., 10000.])\n\nval_generator = DataGenerator(val_files, features, labels, spectators, batch_size=1024, n_dim=ntracks, \n remove_mass_pt_window=False, \n remove_unlabeled=True, max_entry=5000, \n return_spectators=True, scale_mass_pt=[100., 10000.])", "_____no_output_____" ] ], [ [ "## Pretrain discriminator and regressor models", "_____no_output_____" ] ], [ [ "# pretrain discriminator\nkeras_model_disc.trainable = True\nkeras_model_disc.compile(optimizer='adam',\n loss='categorical_crossentropy')\nfor n_epoch in tqdm(range(20)):\n for t in tqdm(train_generator, total=len(train_generator), leave=bool(n_epoch==19)):\n keras_model_disc.fit(t[0], t[1][0],verbose=0) \n \n# pretrain regressor\nkeras_model_reg.trainable = True\nkeras_model_disc.trainable = False\nkeras_model_reg.compile(optimizer=sgd_opt, loss='mse')\nfor n_epoch in tqdm(range(20)):\n for t in tqdm(train_generator, total=len(train_generator), leave=bool(n_epoch==19)):\n keras_model_reg.fit(t[0], t[1][1], verbose=0) ", "_____no_output_____" ] ], [ [ "## Main training loop\n\nDuring the main training loop, we do two things:\n1. Train the discriminator model with the combined loss function $$L = L_\\mathrm{class} - \\lambda L_\\mathrm{reg}$$\n1. Train the regression model to learn the mass from with the standard MSE loss function $$L_\\mathrm{reg}$$", "_____no_output_____" ] ], [ [ "# alternate training discriminator and regressor \nfor n_epoch in tqdm(range(40)):\n for t in tqdm(train_generator, total=len(train_generator), leave=bool(n_epoch==39)):\n # train discriminator\n keras_model_reg.trainable = False\n keras_model_disc.trainable = True\n keras_model_adv.compile(optimizer=sgd_opt, \n loss=['categorical_crossentropy', 'mse'],\n loss_weights=[1, -lam]) \n keras_model_adv.fit(t[0], t[1], verbose=0)\n\n # train regressor\n keras_model_reg.trainable = True\n keras_model_disc.trainable = False\n keras_model_reg.compile(optimizer=sgd_opt, loss='mse')\n keras_model_reg.fit(t[0], t[1][1],verbose=0)\nkeras_model_adv.save_weights('keras_model_adv_best.h5')", "_____no_output_____" ] ], [ [ "## Test", "_____no_output_____" ] ], [ [ "# load testing file\ntest_files = ['root://eospublic.cern.ch//eos/opendata/cms/datascience/HiggsToBBNtupleProducerTool/HiggsToBBNTuple_HiggsToBB_QCD_RunII_13TeV_MC/test/ntuple_merged_0.root']\ntest_generator = DataGenerator(test_files, features, labels, spectators, batch_size=8192, n_dim=ntracks, \n remove_mass_pt_window=True, \n remove_unlabeled=True,\n return_spectators=True,\n max_entry=200000) # basically, no maximum", "_____no_output_____" ], [ "# run model inference on test data set\npredict_array_adv = []\nlabel_array_test = []\nspec_array_test = []\n\nfor t in tqdm(test_generator, total=len(test_generator)):\n label_array_test.append(t[1][0])\n spec_array_test.append(t[1][1])\n predict_array_adv.append(keras_model_adv.predict(t[0])[0])\npredict_array_adv = np.concatenate(predict_array_adv, axis=0)\nlabel_array_test = np.concatenate(label_array_test, axis=0)\nspec_array_test = np.concatenate(spec_array_test, axis=0)", "_____no_output_____" ], [ "# create ROC curves\nprint(label_array_test.shape)\nprint(spec_array_test.shape)\nprint(predict_array_adv.shape)\nfpr_adv, tpr_adv, threshold_adv = roc_curve(label_array_test[:,1], predict_array_adv[:,1])\n \n# plot ROC curves\nplt.figure()\nplt.plot(tpr_adv, fpr_adv, lw=2.5, label=\"Adversarial, AUC = {:.1f}%\".format(auc(fpr_adv,tpr_adv)*100))\nplt.xlabel(r'True positive rate')\nplt.ylabel(r'False positive rate')\nplt.semilogy()\nplt.ylim(0.001, 1)\nplt.xlim(0, 1)\nplt.grid(True)\nplt.legend(loc='upper left')\nplt.show()", "_____no_output_____" ], [ "from utils import find_nearest", "_____no_output_____" ], [ "plt.figure()\nfor wp in [1.0, 0.5, 0.3, 0.1, 0.05]:\n idx, val = find_nearest(fpr_adv, wp)\n plt.hist(spec_array_test[:,0], bins=np.linspace(40, 200, 21), \n weights=label_array_test[:,0]*(predict_array_adv[:,1] > threshold_adv[idx]),\n alpha=0.4, density=True, label='QCD, {}% FPR cut'.format(int(wp*100)),linestyle='-')\nplt.legend()\nplt.xlabel(r'$m_{SD}$')\nplt.ylabel(r'Normalized probability')\nplt.xlim(40, 200)\n\nplt.figure()\nfor wp in [1.0, 0.5, 0.3, 0.1, 0.05]:\n idx, val = find_nearest(fpr_adv, wp)\n plt.hist(spec_array_test[:,0], bins=np.linspace(40, 200, 21), \n weights=label_array_test[:,1]*(predict_array_adv[:,1] > threshold_adv[idx]),\n alpha=0.4, density=True, label='H(bb), {}% FPR cut'.format(int(wp*100)),linestyle='-')\nplt.legend()\nplt.xlabel(r'$m_{SD}$')\nplt.ylabel(r'Normalized probability')\nplt.xlim(40, 200)\nplt.show()\n\nplt.figure()\nplt.hist(predict_array_adv[:,1], bins = np.linspace(0, 1, 21), \n weights=label_array_test[:,1]*0.1,\n alpha=0.4, linestyle='-', label='H(bb)')\nplt.hist(predict_array_adv[:,1], bins = np.linspace(0, 1, 21), \n weights=label_array_test[:,0],\n alpha=0.4, linestyle='-', label='QCD')\nplt.legend()\nplt.show()\n\n\nplt.figure()\nplt.hist(spec_array_test[:,0], bins = np.linspace(40, 200, 21), \n weights = label_array_test[:,1]*0.1,\n alpha=0.4, linestyle='-', label='H(bb)')\nplt.hist(spec_array_test[:,0], bins = np.linspace(40, 200, 21), \n weights = label_array_test[:,0],\n alpha=0.4, linestyle='-', label='QCD')\nplt.legend()\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
4a7b4977473f7feef2749d2f9cd0e8eff7a87cd8
126,039
ipynb
Jupyter Notebook
Homework 4-NEELAM(sep 9).ipynb
neelam937/Trichandra-multiple-campus
96a97168b572d17d22ae6b25394bc0758d44ec53
[ "MIT" ]
1
2020-09-16T09:17:27.000Z
2020-09-16T09:17:27.000Z
Homework 4-NEELAM(sep 9).ipynb
neelam937/Trichandra-multiple-campus
96a97168b572d17d22ae6b25394bc0758d44ec53
[ "MIT" ]
null
null
null
Homework 4-NEELAM(sep 9).ipynb
neelam937/Trichandra-multiple-campus
96a97168b572d17d22ae6b25394bc0758d44ec53
[ "MIT" ]
null
null
null
300.809069
54,160
0.848555
[ [ [ "## TASK-1: Make a class to calculate the range, time of flight and horizontal range of the projectile fired from the ground.", "_____no_output_____" ], [ "## TASK-2: Use the list to find the range, time of flight and horizontal range for varying value of angle from 1 degree to 90 dergree.", "_____no_output_____" ], [ "## TASK-3: Make a plot to show the variation of range, time of flight and horizontal range with angle of projection.", "_____no_output_____" ], [ "## TASK-4: Change the list of [angle], [range], [time of flight] and [horizontal range] into dictionary and finely into dataframe using pandas. Save the file in your PC in csv file.", "_____no_output_____" ], [ "### Required formula:", "_____no_output_____" ], [ "### Horizontal range: $R=u^2sin2A/g$", "_____no_output_____" ], [ "### Time of flight: $T = 2usinA/g$", "_____no_output_____" ], [ "### Maximum Height: $H = u^2*sin^2A/2g$", "_____no_output_____" ] ], [ [ "import math\nimport numpy as np", "_____no_output_____" ], [ "class Projectile():\n def __init__(self,u,A,g):\n self.u=u\n self.A=A\n self.g=g\n def HorizontalRange(self):\n R= (self.u^2) * math.sin(2 * self.A * math.pi/180)/ (self.g)\n return R\n def TimeofFlight(self):\n T= (self.u*2) * math.sin(self.A* math.pi/180) / (self.g)\n return T\n def MaximumHeight(self):\n H=(self.u * math.sin(self.A* math.pi/180))**2 / (self.g*2)\n return H\n def update_A(self,A):\n self.A=A\n ", "_____no_output_____" ], [ "u=36 #in m/s\ng=9.8 #in m/s^2\nP = Projectile(36, 0, 9.8 )\nR=[] #empty list to collect horizontal range\nT=[] #empty list to collect the time of flight\nH=[] #empty list to collect the maximum height\nN=[] #empty list to collect angle of projection\nx=np.arange(0,90+0.1,0.1)\nfor i in x:\n N.append(i)\n P.update_A(i)\n r=P.HorizontalRange()\n t=P.TimeofFlight()\n h=P.MaximumHeight()\n R.append(i)\n T.append(t)\n H.append(h)\n", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\nplt.subplot(2,2,1)\nplt.plot(N,R)\nplt.xlabel('N')\nplt.ylabel('R')\nplt.title(\"Angle of projection with Horizontal Range\")\nplt.subplot(2,2,2)\nplt.plot(N,T)\nplt.xlabel('N')\nplt.ylabel('T')\nplt.title(\"Angle of projection with Time of Flight\")\nplt.subplot(2,2,3)\nplt.plot(N,H)\nplt.xlabel('N')\nplt.ylabel('H')\nplt.title(\"Angle of projection with Maximum Distance\")", "_____no_output_____" ], [ "data={} #empty list\ndata.update({\"Angle_of_projection\":N,\"Horizontal_Range\":R,\"Time_of_Flight\":T,\"Maximum_Distance\":H})\nprint(data)", "{'Angle_of_projection': [0.0, 0.1, 0.2, 0.30000000000000004, 0.4, 0.5, 0.6000000000000001, 0.7000000000000001, 0.8, 0.9, 1.0, 1.1, 1.2000000000000002, 1.3, 1.4000000000000001, 1.5, 1.6, 1.7000000000000002, 1.8, 1.9000000000000001, 2.0, 2.1, 2.2, 2.3000000000000003, 2.4000000000000004, 2.5, 2.6, 2.7, 2.8000000000000003, 2.9000000000000004, 3.0, 3.1, 3.2, 3.3000000000000003, 3.4000000000000004, 3.5, 3.6, 3.7, 3.8000000000000003, 3.9000000000000004, 4.0, 4.1000000000000005, 4.2, 4.3, 4.4, 4.5, 4.6000000000000005, 4.7, 4.800000000000001, 4.9, 5.0, 5.1000000000000005, 5.2, 5.300000000000001, 5.4, 5.5, 5.6000000000000005, 5.7, 5.800000000000001, 5.9, 6.0, 6.1000000000000005, 6.2, 6.300000000000001, 6.4, 6.5, 6.6000000000000005, 6.7, 6.800000000000001, 6.9, 7.0, 7.1000000000000005, 7.2, 7.300000000000001, 7.4, 7.5, 7.6000000000000005, 7.7, 7.800000000000001, 7.9, 8.0, 8.1, 8.200000000000001, 8.3, 8.4, 8.5, 8.6, 8.700000000000001, 8.8, 8.9, 9.0, 9.1, 9.200000000000001, 9.3, 9.4, 9.5, 9.600000000000001, 9.700000000000001, 9.8, 9.9, 10.0, 10.100000000000001, 10.200000000000001, 10.3, 10.4, 10.5, 10.600000000000001, 10.700000000000001, 10.8, 10.9, 11.0, 11.100000000000001, 11.200000000000001, 11.3, 11.4, 11.5, 11.600000000000001, 11.700000000000001, 11.8, 11.9, 12.0, 12.100000000000001, 12.200000000000001, 12.3, 12.4, 12.5, 12.600000000000001, 12.700000000000001, 12.8, 12.9, 13.0, 13.100000000000001, 13.200000000000001, 13.3, 13.4, 13.5, 13.600000000000001, 13.700000000000001, 13.8, 13.9, 14.0, 14.100000000000001, 14.200000000000001, 14.3, 14.4, 14.5, 14.600000000000001, 14.700000000000001, 14.8, 14.9, 15.0, 15.100000000000001, 15.200000000000001, 15.3, 15.4, 15.5, 15.600000000000001, 15.700000000000001, 15.8, 15.9, 16.0, 16.1, 16.2, 16.3, 16.400000000000002, 16.5, 16.6, 16.7, 16.8, 16.900000000000002, 17.0, 17.1, 17.2, 17.3, 17.400000000000002, 17.5, 17.6, 17.7, 17.8, 17.900000000000002, 18.0, 18.1, 18.2, 18.3, 18.400000000000002, 18.5, 18.6, 18.7, 18.8, 18.900000000000002, 19.0, 19.1, 19.200000000000003, 19.3, 19.400000000000002, 19.5, 19.6, 19.700000000000003, 19.8, 19.900000000000002, 20.0, 20.1, 20.200000000000003, 20.3, 20.400000000000002, 20.5, 20.6, 20.700000000000003, 20.8, 20.900000000000002, 21.0, 21.1, 21.200000000000003, 21.3, 21.400000000000002, 21.5, 21.6, 21.700000000000003, 21.8, 21.900000000000002, 22.0, 22.1, 22.200000000000003, 22.3, 22.400000000000002, 22.5, 22.6, 22.700000000000003, 22.8, 22.900000000000002, 23.0, 23.1, 23.200000000000003, 23.3, 23.400000000000002, 23.5, 23.6, 23.700000000000003, 23.8, 23.900000000000002, 24.0, 24.1, 24.200000000000003, 24.3, 24.400000000000002, 24.5, 24.6, 24.700000000000003, 24.8, 24.900000000000002, 25.0, 25.1, 25.200000000000003, 25.3, 25.400000000000002, 25.5, 25.6, 25.700000000000003, 25.8, 25.900000000000002, 26.0, 26.1, 26.200000000000003, 26.3, 26.400000000000002, 26.5, 26.6, 26.700000000000003, 26.8, 26.900000000000002, 27.0, 27.1, 27.200000000000003, 27.3, 27.400000000000002, 27.5, 27.6, 27.700000000000003, 27.8, 27.900000000000002, 28.0, 28.1, 28.200000000000003, 28.3, 28.400000000000002, 28.5, 28.6, 28.700000000000003, 28.8, 28.900000000000002, 29.0, 29.1, 29.200000000000003, 29.3, 29.400000000000002, 29.5, 29.6, 29.700000000000003, 29.8, 29.900000000000002, 30.0, 30.1, 30.200000000000003, 30.3, 30.400000000000002, 30.5, 30.6, 30.700000000000003, 30.8, 30.900000000000002, 31.0, 31.1, 31.200000000000003, 31.3, 31.400000000000002, 31.5, 31.6, 31.700000000000003, 31.8, 31.900000000000002, 32.0, 32.1, 32.2, 32.300000000000004, 32.4, 32.5, 32.6, 32.7, 32.800000000000004, 32.9, 33.0, 33.1, 33.2, 33.300000000000004, 33.4, 33.5, 33.6, 33.7, 33.800000000000004, 33.9, 34.0, 34.1, 34.2, 34.300000000000004, 34.4, 34.5, 34.6, 34.7, 34.800000000000004, 34.9, 35.0, 35.1, 35.2, 35.300000000000004, 35.4, 35.5, 35.6, 35.7, 35.800000000000004, 35.9, 36.0, 36.1, 36.2, 36.300000000000004, 36.4, 36.5, 36.6, 36.7, 36.800000000000004, 36.9, 37.0, 37.1, 37.2, 37.300000000000004, 37.4, 37.5, 37.6, 37.7, 37.800000000000004, 37.9, 38.0, 38.1, 38.2, 38.300000000000004, 38.400000000000006, 38.5, 38.6, 38.7, 38.800000000000004, 38.900000000000006, 39.0, 39.1, 39.2, 39.300000000000004, 39.400000000000006, 39.5, 39.6, 39.7, 39.800000000000004, 39.900000000000006, 40.0, 40.1, 40.2, 40.300000000000004, 40.400000000000006, 40.5, 40.6, 40.7, 40.800000000000004, 40.900000000000006, 41.0, 41.1, 41.2, 41.300000000000004, 41.400000000000006, 41.5, 41.6, 41.7, 41.800000000000004, 41.900000000000006, 42.0, 42.1, 42.2, 42.300000000000004, 42.400000000000006, 42.5, 42.6, 42.7, 42.800000000000004, 42.900000000000006, 43.0, 43.1, 43.2, 43.300000000000004, 43.400000000000006, 43.5, 43.6, 43.7, 43.800000000000004, 43.900000000000006, 44.0, 44.1, 44.2, 44.300000000000004, 44.400000000000006, 44.5, 44.6, 44.7, 44.800000000000004, 44.900000000000006, 45.0, 45.1, 45.2, 45.300000000000004, 45.400000000000006, 45.5, 45.6, 45.7, 45.800000000000004, 45.900000000000006, 46.0, 46.1, 46.2, 46.300000000000004, 46.400000000000006, 46.5, 46.6, 46.7, 46.800000000000004, 46.900000000000006, 47.0, 47.1, 47.2, 47.300000000000004, 47.400000000000006, 47.5, 47.6, 47.7, 47.800000000000004, 47.900000000000006, 48.0, 48.1, 48.2, 48.300000000000004, 48.400000000000006, 48.5, 48.6, 48.7, 48.800000000000004, 48.900000000000006, 49.0, 49.1, 49.2, 49.300000000000004, 49.400000000000006, 49.5, 49.6, 49.7, 49.800000000000004, 49.900000000000006, 50.0, 50.1, 50.2, 50.300000000000004, 50.400000000000006, 50.5, 50.6, 50.7, 50.800000000000004, 50.900000000000006, 51.0, 51.1, 51.2, 51.300000000000004, 51.400000000000006, 51.5, 51.6, 51.7, 51.800000000000004, 51.900000000000006, 52.0, 52.1, 52.2, 52.300000000000004, 52.400000000000006, 52.5, 52.6, 52.7, 52.800000000000004, 52.900000000000006, 53.0, 53.1, 53.2, 53.300000000000004, 53.400000000000006, 53.5, 53.6, 53.7, 53.800000000000004, 53.900000000000006, 54.0, 54.1, 54.2, 54.300000000000004, 54.400000000000006, 54.5, 54.6, 54.7, 54.800000000000004, 54.900000000000006, 55.0, 55.1, 55.2, 55.300000000000004, 55.400000000000006, 55.5, 55.6, 55.7, 55.800000000000004, 55.900000000000006, 56.0, 56.1, 56.2, 56.300000000000004, 56.400000000000006, 56.5, 56.6, 56.7, 56.800000000000004, 56.900000000000006, 57.0, 57.1, 57.2, 57.300000000000004, 57.400000000000006, 57.5, 57.6, 57.7, 57.800000000000004, 57.900000000000006, 58.0, 58.1, 58.2, 58.300000000000004, 58.400000000000006, 58.5, 58.6, 58.7, 58.800000000000004, 58.900000000000006, 59.0, 59.1, 59.2, 59.300000000000004, 59.400000000000006, 59.5, 59.6, 59.7, 59.800000000000004, 59.900000000000006, 60.0, 60.1, 60.2, 60.300000000000004, 60.400000000000006, 60.5, 60.6, 60.7, 60.800000000000004, 60.900000000000006, 61.0, 61.1, 61.2, 61.300000000000004, 61.400000000000006, 61.5, 61.6, 61.7, 61.800000000000004, 61.900000000000006, 62.0, 62.1, 62.2, 62.300000000000004, 62.400000000000006, 62.5, 62.6, 62.7, 62.800000000000004, 62.900000000000006, 63.0, 63.1, 63.2, 63.300000000000004, 63.400000000000006, 63.5, 63.6, 63.7, 63.800000000000004, 63.900000000000006, 64.0, 64.10000000000001, 64.2, 64.3, 64.4, 64.5, 64.60000000000001, 64.7, 64.8, 64.9, 65.0, 65.10000000000001, 65.2, 65.3, 65.4, 65.5, 65.60000000000001, 65.7, 65.8, 65.9, 66.0, 66.10000000000001, 66.2, 66.3, 66.4, 66.5, 66.60000000000001, 66.7, 66.8, 66.9, 67.0, 67.10000000000001, 67.2, 67.3, 67.4, 67.5, 67.60000000000001, 67.7, 67.8, 67.9, 68.0, 68.10000000000001, 68.2, 68.3, 68.4, 68.5, 68.60000000000001, 68.7, 68.8, 68.9, 69.0, 69.10000000000001, 69.2, 69.3, 69.4, 69.5, 69.60000000000001, 69.7, 69.8, 69.9, 70.0, 70.10000000000001, 70.2, 70.3, 70.4, 70.5, 70.60000000000001, 70.7, 70.8, 70.9, 71.0, 71.10000000000001, 71.2, 71.3, 71.4, 71.5, 71.60000000000001, 71.7, 71.8, 71.9, 72.0, 72.10000000000001, 72.2, 72.3, 72.4, 72.5, 72.60000000000001, 72.7, 72.8, 72.9, 73.0, 73.10000000000001, 73.2, 73.3, 73.4, 73.5, 73.60000000000001, 73.7, 73.8, 73.9, 74.0, 74.10000000000001, 74.2, 74.3, 74.4, 74.5, 74.60000000000001, 74.7, 74.8, 74.9, 75.0, 75.10000000000001, 75.2, 75.3, 75.4, 75.5, 75.60000000000001, 75.7, 75.8, 75.9, 76.0, 76.10000000000001, 76.2, 76.3, 76.4, 76.5, 76.60000000000001, 76.7, 76.80000000000001, 76.9, 77.0, 77.10000000000001, 77.2, 77.30000000000001, 77.4, 77.5, 77.60000000000001, 77.7, 77.80000000000001, 77.9, 78.0, 78.10000000000001, 78.2, 78.30000000000001, 78.4, 78.5, 78.60000000000001, 78.7, 78.80000000000001, 78.9, 79.0, 79.10000000000001, 79.2, 79.30000000000001, 79.4, 79.5, 79.60000000000001, 79.7, 79.80000000000001, 79.9, 80.0, 80.10000000000001, 80.2, 80.30000000000001, 80.4, 80.5, 80.60000000000001, 80.7, 80.80000000000001, 80.9, 81.0, 81.10000000000001, 81.2, 81.30000000000001, 81.4, 81.5, 81.60000000000001, 81.7, 81.80000000000001, 81.9, 82.0, 82.10000000000001, 82.2, 82.30000000000001, 82.4, 82.5, 82.60000000000001, 82.7, 82.80000000000001, 82.9, 83.0, 83.10000000000001, 83.2, 83.30000000000001, 83.4, 83.5, 83.60000000000001, 83.7, 83.80000000000001, 83.9, 84.0, 84.10000000000001, 84.2, 84.30000000000001, 84.4, 84.5, 84.60000000000001, 84.7, 84.80000000000001, 84.9, 85.0, 85.10000000000001, 85.2, 85.30000000000001, 85.4, 85.5, 85.60000000000001, 85.7, 85.80000000000001, 85.9, 86.0, 86.10000000000001, 86.2, 86.30000000000001, 86.4, 86.5, 86.60000000000001, 86.7, 86.80000000000001, 86.9, 87.0, 87.10000000000001, 87.2, 87.30000000000001, 87.4, 87.5, 87.60000000000001, 87.7, 87.80000000000001, 87.9, 88.0, 88.10000000000001, 88.2, 88.30000000000001, 88.4, 88.5, 88.60000000000001, 88.7, 88.80000000000001, 88.9, 89.0, 89.10000000000001, 89.2, 89.30000000000001, 89.4, 89.5, 89.60000000000001, 89.7, 89.80000000000001, 89.9, 90.0], 'Horizontal_Range': [0.0, 0.1, 0.2, 0.30000000000000004, 0.4, 0.5, 0.6000000000000001, 0.7000000000000001, 0.8, 0.9, 1.0, 1.1, 1.2000000000000002, 1.3, 1.4000000000000001, 1.5, 1.6, 1.7000000000000002, 1.8, 1.9000000000000001, 2.0, 2.1, 2.2, 2.3000000000000003, 2.4000000000000004, 2.5, 2.6, 2.7, 2.8000000000000003, 2.9000000000000004, 3.0, 3.1, 3.2, 3.3000000000000003, 3.4000000000000004, 3.5, 3.6, 3.7, 3.8000000000000003, 3.9000000000000004, 4.0, 4.1000000000000005, 4.2, 4.3, 4.4, 4.5, 4.6000000000000005, 4.7, 4.800000000000001, 4.9, 5.0, 5.1000000000000005, 5.2, 5.300000000000001, 5.4, 5.5, 5.6000000000000005, 5.7, 5.800000000000001, 5.9, 6.0, 6.1000000000000005, 6.2, 6.300000000000001, 6.4, 6.5, 6.6000000000000005, 6.7, 6.800000000000001, 6.9, 7.0, 7.1000000000000005, 7.2, 7.300000000000001, 7.4, 7.5, 7.6000000000000005, 7.7, 7.800000000000001, 7.9, 8.0, 8.1, 8.200000000000001, 8.3, 8.4, 8.5, 8.6, 8.700000000000001, 8.8, 8.9, 9.0, 9.1, 9.200000000000001, 9.3, 9.4, 9.5, 9.600000000000001, 9.700000000000001, 9.8, 9.9, 10.0, 10.100000000000001, 10.200000000000001, 10.3, 10.4, 10.5, 10.600000000000001, 10.700000000000001, 10.8, 10.9, 11.0, 11.100000000000001, 11.200000000000001, 11.3, 11.4, 11.5, 11.600000000000001, 11.700000000000001, 11.8, 11.9, 12.0, 12.100000000000001, 12.200000000000001, 12.3, 12.4, 12.5, 12.600000000000001, 12.700000000000001, 12.8, 12.9, 13.0, 13.100000000000001, 13.200000000000001, 13.3, 13.4, 13.5, 13.600000000000001, 13.700000000000001, 13.8, 13.9, 14.0, 14.100000000000001, 14.200000000000001, 14.3, 14.4, 14.5, 14.600000000000001, 14.700000000000001, 14.8, 14.9, 15.0, 15.100000000000001, 15.200000000000001, 15.3, 15.4, 15.5, 15.600000000000001, 15.700000000000001, 15.8, 15.9, 16.0, 16.1, 16.2, 16.3, 16.400000000000002, 16.5, 16.6, 16.7, 16.8, 16.900000000000002, 17.0, 17.1, 17.2, 17.3, 17.400000000000002, 17.5, 17.6, 17.7, 17.8, 17.900000000000002, 18.0, 18.1, 18.2, 18.3, 18.400000000000002, 18.5, 18.6, 18.7, 18.8, 18.900000000000002, 19.0, 19.1, 19.200000000000003, 19.3, 19.400000000000002, 19.5, 19.6, 19.700000000000003, 19.8, 19.900000000000002, 20.0, 20.1, 20.200000000000003, 20.3, 20.400000000000002, 20.5, 20.6, 20.700000000000003, 20.8, 20.900000000000002, 21.0, 21.1, 21.200000000000003, 21.3, 21.400000000000002, 21.5, 21.6, 21.700000000000003, 21.8, 21.900000000000002, 22.0, 22.1, 22.200000000000003, 22.3, 22.400000000000002, 22.5, 22.6, 22.700000000000003, 22.8, 22.900000000000002, 23.0, 23.1, 23.200000000000003, 23.3, 23.400000000000002, 23.5, 23.6, 23.700000000000003, 23.8, 23.900000000000002, 24.0, 24.1, 24.200000000000003, 24.3, 24.400000000000002, 24.5, 24.6, 24.700000000000003, 24.8, 24.900000000000002, 25.0, 25.1, 25.200000000000003, 25.3, 25.400000000000002, 25.5, 25.6, 25.700000000000003, 25.8, 25.900000000000002, 26.0, 26.1, 26.200000000000003, 26.3, 26.400000000000002, 26.5, 26.6, 26.700000000000003, 26.8, 26.900000000000002, 27.0, 27.1, 27.200000000000003, 27.3, 27.400000000000002, 27.5, 27.6, 27.700000000000003, 27.8, 27.900000000000002, 28.0, 28.1, 28.200000000000003, 28.3, 28.400000000000002, 28.5, 28.6, 28.700000000000003, 28.8, 28.900000000000002, 29.0, 29.1, 29.200000000000003, 29.3, 29.400000000000002, 29.5, 29.6, 29.700000000000003, 29.8, 29.900000000000002, 30.0, 30.1, 30.200000000000003, 30.3, 30.400000000000002, 30.5, 30.6, 30.700000000000003, 30.8, 30.900000000000002, 31.0, 31.1, 31.200000000000003, 31.3, 31.400000000000002, 31.5, 31.6, 31.700000000000003, 31.8, 31.900000000000002, 32.0, 32.1, 32.2, 32.300000000000004, 32.4, 32.5, 32.6, 32.7, 32.800000000000004, 32.9, 33.0, 33.1, 33.2, 33.300000000000004, 33.4, 33.5, 33.6, 33.7, 33.800000000000004, 33.9, 34.0, 34.1, 34.2, 34.300000000000004, 34.4, 34.5, 34.6, 34.7, 34.800000000000004, 34.9, 35.0, 35.1, 35.2, 35.300000000000004, 35.4, 35.5, 35.6, 35.7, 35.800000000000004, 35.9, 36.0, 36.1, 36.2, 36.300000000000004, 36.4, 36.5, 36.6, 36.7, 36.800000000000004, 36.9, 37.0, 37.1, 37.2, 37.300000000000004, 37.4, 37.5, 37.6, 37.7, 37.800000000000004, 37.9, 38.0, 38.1, 38.2, 38.300000000000004, 38.400000000000006, 38.5, 38.6, 38.7, 38.800000000000004, 38.900000000000006, 39.0, 39.1, 39.2, 39.300000000000004, 39.400000000000006, 39.5, 39.6, 39.7, 39.800000000000004, 39.900000000000006, 40.0, 40.1, 40.2, 40.300000000000004, 40.400000000000006, 40.5, 40.6, 40.7, 40.800000000000004, 40.900000000000006, 41.0, 41.1, 41.2, 41.300000000000004, 41.400000000000006, 41.5, 41.6, 41.7, 41.800000000000004, 41.900000000000006, 42.0, 42.1, 42.2, 42.300000000000004, 42.400000000000006, 42.5, 42.6, 42.7, 42.800000000000004, 42.900000000000006, 43.0, 43.1, 43.2, 43.300000000000004, 43.400000000000006, 43.5, 43.6, 43.7, 43.800000000000004, 43.900000000000006, 44.0, 44.1, 44.2, 44.300000000000004, 44.400000000000006, 44.5, 44.6, 44.7, 44.800000000000004, 44.900000000000006, 45.0, 45.1, 45.2, 45.300000000000004, 45.400000000000006, 45.5, 45.6, 45.7, 45.800000000000004, 45.900000000000006, 46.0, 46.1, 46.2, 46.300000000000004, 46.400000000000006, 46.5, 46.6, 46.7, 46.800000000000004, 46.900000000000006, 47.0, 47.1, 47.2, 47.300000000000004, 47.400000000000006, 47.5, 47.6, 47.7, 47.800000000000004, 47.900000000000006, 48.0, 48.1, 48.2, 48.300000000000004, 48.400000000000006, 48.5, 48.6, 48.7, 48.800000000000004, 48.900000000000006, 49.0, 49.1, 49.2, 49.300000000000004, 49.400000000000006, 49.5, 49.6, 49.7, 49.800000000000004, 49.900000000000006, 50.0, 50.1, 50.2, 50.300000000000004, 50.400000000000006, 50.5, 50.6, 50.7, 50.800000000000004, 50.900000000000006, 51.0, 51.1, 51.2, 51.300000000000004, 51.400000000000006, 51.5, 51.6, 51.7, 51.800000000000004, 51.900000000000006, 52.0, 52.1, 52.2, 52.300000000000004, 52.400000000000006, 52.5, 52.6, 52.7, 52.800000000000004, 52.900000000000006, 53.0, 53.1, 53.2, 53.300000000000004, 53.400000000000006, 53.5, 53.6, 53.7, 53.800000000000004, 53.900000000000006, 54.0, 54.1, 54.2, 54.300000000000004, 54.400000000000006, 54.5, 54.6, 54.7, 54.800000000000004, 54.900000000000006, 55.0, 55.1, 55.2, 55.300000000000004, 55.400000000000006, 55.5, 55.6, 55.7, 55.800000000000004, 55.900000000000006, 56.0, 56.1, 56.2, 56.300000000000004, 56.400000000000006, 56.5, 56.6, 56.7, 56.800000000000004, 56.900000000000006, 57.0, 57.1, 57.2, 57.300000000000004, 57.400000000000006, 57.5, 57.6, 57.7, 57.800000000000004, 57.900000000000006, 58.0, 58.1, 58.2, 58.300000000000004, 58.400000000000006, 58.5, 58.6, 58.7, 58.800000000000004, 58.900000000000006, 59.0, 59.1, 59.2, 59.300000000000004, 59.400000000000006, 59.5, 59.6, 59.7, 59.800000000000004, 59.900000000000006, 60.0, 60.1, 60.2, 60.300000000000004, 60.400000000000006, 60.5, 60.6, 60.7, 60.800000000000004, 60.900000000000006, 61.0, 61.1, 61.2, 61.300000000000004, 61.400000000000006, 61.5, 61.6, 61.7, 61.800000000000004, 61.900000000000006, 62.0, 62.1, 62.2, 62.300000000000004, 62.400000000000006, 62.5, 62.6, 62.7, 62.800000000000004, 62.900000000000006, 63.0, 63.1, 63.2, 63.300000000000004, 63.400000000000006, 63.5, 63.6, 63.7, 63.800000000000004, 63.900000000000006, 64.0, 64.10000000000001, 64.2, 64.3, 64.4, 64.5, 64.60000000000001, 64.7, 64.8, 64.9, 65.0, 65.10000000000001, 65.2, 65.3, 65.4, 65.5, 65.60000000000001, 65.7, 65.8, 65.9, 66.0, 66.10000000000001, 66.2, 66.3, 66.4, 66.5, 66.60000000000001, 66.7, 66.8, 66.9, 67.0, 67.10000000000001, 67.2, 67.3, 67.4, 67.5, 67.60000000000001, 67.7, 67.8, 67.9, 68.0, 68.10000000000001, 68.2, 68.3, 68.4, 68.5, 68.60000000000001, 68.7, 68.8, 68.9, 69.0, 69.10000000000001, 69.2, 69.3, 69.4, 69.5, 69.60000000000001, 69.7, 69.8, 69.9, 70.0, 70.10000000000001, 70.2, 70.3, 70.4, 70.5, 70.60000000000001, 70.7, 70.8, 70.9, 71.0, 71.10000000000001, 71.2, 71.3, 71.4, 71.5, 71.60000000000001, 71.7, 71.8, 71.9, 72.0, 72.10000000000001, 72.2, 72.3, 72.4, 72.5, 72.60000000000001, 72.7, 72.8, 72.9, 73.0, 73.10000000000001, 73.2, 73.3, 73.4, 73.5, 73.60000000000001, 73.7, 73.8, 73.9, 74.0, 74.10000000000001, 74.2, 74.3, 74.4, 74.5, 74.60000000000001, 74.7, 74.8, 74.9, 75.0, 75.10000000000001, 75.2, 75.3, 75.4, 75.5, 75.60000000000001, 75.7, 75.8, 75.9, 76.0, 76.10000000000001, 76.2, 76.3, 76.4, 76.5, 76.60000000000001, 76.7, 76.80000000000001, 76.9, 77.0, 77.10000000000001, 77.2, 77.30000000000001, 77.4, 77.5, 77.60000000000001, 77.7, 77.80000000000001, 77.9, 78.0, 78.10000000000001, 78.2, 78.30000000000001, 78.4, 78.5, 78.60000000000001, 78.7, 78.80000000000001, 78.9, 79.0, 79.10000000000001, 79.2, 79.30000000000001, 79.4, 79.5, 79.60000000000001, 79.7, 79.80000000000001, 79.9, 80.0, 80.10000000000001, 80.2, 80.30000000000001, 80.4, 80.5, 80.60000000000001, 80.7, 80.80000000000001, 80.9, 81.0, 81.10000000000001, 81.2, 81.30000000000001, 81.4, 81.5, 81.60000000000001, 81.7, 81.80000000000001, 81.9, 82.0, 82.10000000000001, 82.2, 82.30000000000001, 82.4, 82.5, 82.60000000000001, 82.7, 82.80000000000001, 82.9, 83.0, 83.10000000000001, 83.2, 83.30000000000001, 83.4, 83.5, 83.60000000000001, 83.7, 83.80000000000001, 83.9, 84.0, 84.10000000000001, 84.2, 84.30000000000001, 84.4, 84.5, 84.60000000000001, 84.7, 84.80000000000001, 84.9, 85.0, 85.10000000000001, 85.2, 85.30000000000001, 85.4, 85.5, 85.60000000000001, 85.7, 85.80000000000001, 85.9, 86.0, 86.10000000000001, 86.2, 86.30000000000001, 86.4, 86.5, 86.60000000000001, 86.7, 86.80000000000001, 86.9, 87.0, 87.10000000000001, 87.2, 87.30000000000001, 87.4, 87.5, 87.60000000000001, 87.7, 87.80000000000001, 87.9, 88.0, 88.10000000000001, 88.2, 88.30000000000001, 88.4, 88.5, 88.60000000000001, 88.7, 88.80000000000001, 88.9, 89.0, 89.10000000000001, 89.2, 89.30000000000001, 89.4, 89.5, 89.60000000000001, 89.7, 89.80000000000001, 89.9, 90.0], 'Time_of_Flight': [0.0, 0.012822820647416144, 0.025645602234296808, 0.03846830570022549, 0.051290891985023644, 0.06411332202886971, 0.07693555677241806, 0.08975755715691797, 0.10257928412433259, 0.11540069861745801, 0.12822176158004212, 0.1410424339569037, 0.15386267669405113, 0.1666824507388018, 0.1795017170399007, 0.19232043654763945, 0.2051385702139754, 0.21795607899265046, 0.2307729238393099, 0.2435890657116216, 0.2564044655693949, 0.26921908437469916, 0.2820328830919832, 0.294845822688194, 0.30765786413289525, 0.3204689683983869, 0.3332790964598237, 0.3460882092953338, 0.3588962678861382, 0.37170323321666926, 0.38450906627468934, 0.3973137280514103, 0.4101171795416116, 0.4229193817437596, 0.4357202956601262, 0.4485198822969076, 0.4613181026643431, 0.474114917776834, 0.48691028865306224, 0.49970417631610903, 0.5124965417935736, 0.5252873461176925, 0.5380765503254573, 0.550864115458734, 0.5636500025643818, 0.5764341726943709, 0.5892165869059023, 0.6019972062615252, 0.6147759918292567, 0.6275529046826998, 0.640327905901162, 0.6531009565697745, 0.6658720177796094, 0.6786410506277993, 0.6914080162176561, 0.7041728756587884, 0.7169355900672205, 0.7296961205655111, 0.7424544282828711, 0.7552104743552827, 0.7679642199256171, 0.7807156261437541, 0.7934646541666983, 0.8062112651587, 0.8189554202913707, 0.8316970807438044, 0.8444362077026933, 0.8571727623624478, 0.8699067059253132, 0.8826379996014888, 0.8953666046092468, 0.9080924821750485, 0.9208155935336638, 0.9335358999282892, 0.9462533626106651, 0.9589679428411951, 0.9716796018890628, 0.9843883010323498, 0.9970940015581548, 1.0097966647627101, 1.022496251951501, 1.0351927244393826, 1.047886043550698, 1.0605761706193948, 1.0732630669891465, 1.0859466940134659, 1.0986270130558249, 1.111303985489774, 1.1239775726990553, 1.1366477360777247, 1.1493144370302677, 1.1619776369717156, 1.1746372973277663, 1.187293379534898, 1.19994584504049, 1.2125946553029376, 1.2252397717917718, 1.237881155987774, 1.2505187693830964, 1.2631525734813762, 1.2757825297978553, 1.2884085998594972, 1.3010307452051018, 1.313648927385426, 1.3262631079632976, 1.3388732485137365, 1.3514793106240663, 1.3640812558940367, 1.3766790459359357, 1.3892726423747115, 1.4018620068480843, 1.4144471010066668, 1.4270278865140795, 1.4396043250470687, 1.452176378295621, 1.4647440079630811, 1.4773071757662712, 1.4898658434356018, 1.5024199727151943, 1.5149695253629931, 1.5275144631508848, 1.5400547478648132, 1.552590341304896, 1.565121205285542, 1.577647301635566, 1.5901685921983066, 1.602685038831741, 1.6151966034086023, 1.6277032478164943, 1.64020493395801, 1.652701623750845, 1.6651932791279147, 1.6776798620374704, 1.690161334443216, 1.7026376583244212, 1.7151087956760394, 1.7275747085088247, 1.740035358849443, 1.752490708740594, 1.764940720241121, 1.77738535542613, 1.7898245763871041, 1.8022583452320189, 1.814686624085458, 1.8271093750887288, 1.8395265603999778, 1.8519381421943042, 1.8643440826638782, 1.8767443440180525, 1.889138888483482, 1.9015276783042339, 1.9139106757419067, 1.9262878430757404, 1.9386591426027393, 1.9510245366377792, 1.9633839875137236, 1.9757374575815443, 1.9880849092104267, 2.000426304787893, 2.012761606719912, 2.0250907774310143, 2.0374137793644085, 2.0497305749820924, 2.0620411267649716, 2.07434539721297, 2.0866433488451457, 2.0989349441998053, 2.111220145834618, 2.12349891632673, 2.1357712182728754, 2.1480370142894945, 2.160296267012846, 2.172548939099118, 2.1847949932245494, 2.1970343920855346, 2.2092670983987412, 2.221493074901225, 2.2337122843505406, 2.2459246895248577, 2.2581302532230727, 2.2703289382649197, 2.2825207074910905, 2.2947055237633407, 2.3068833499646075, 2.31905414899912, 2.3312178837925135, 2.3433745172919425, 2.355524012466193, 2.3676663323057956, 2.379801439823138, 2.3919292980525793, 2.404049870050559, 2.4161631188957133, 2.4282690076889857, 2.4403674995537386, 2.4524585576358677, 2.4645421451039145, 2.4766182251491746, 2.4886867609858143, 2.5007477158509808, 2.512801053004913, 2.5248467357310567, 2.536884727336173, 2.5489149911504514, 2.560937490527622, 2.572952188845066, 2.5849590495039316, 2.5969580359292364, 2.608949111569986, 2.620932239899287, 2.632907384414451, 2.644874508637111, 2.656833576113331, 2.6687845504137186, 2.680727395133532, 2.6926620738927958, 2.704588550336409, 2.716506788134257, 2.7284167509813173, 2.74031840259778, 2.7522117067291494, 2.7640966271463596, 2.775973127645881, 2.787841172049833, 2.7997007242060947, 2.8115517479884145, 2.823394207296517, 2.835228066056218, 2.847053288219532, 2.8588698377647805, 2.8706776786967048, 2.8824767750465736, 2.8942670908722943, 2.906048590258518, 2.917821237316755, 2.9295849961854823, 2.9413398310302483, 2.953085706043789, 2.9648225854461305, 2.9765504334847046, 2.98826921443445, 2.9999788925979294, 3.0116794323054323, 3.023370797915085, 3.03505295381296, 3.0467258644131845, 3.0583894941580496, 3.0700438075181147, 3.0816887689923216, 3.093324343108097, 3.1049504944214648, 3.116567187517152, 3.1281743870086975, 3.139772057538557, 3.151360163778215, 3.1629386704282902, 3.1745075422186453, 3.1860667439084893, 3.1976162402864876, 3.2091559961708733, 3.220685976409548, 3.2322061458801925, 3.2437164694903724, 3.2552169121776466, 3.2667074389096706, 3.278188014684309, 3.2896586045297362, 3.301119173504546, 3.312569686697859, 3.324010109229424, 3.335440406249731, 3.3468605429401137, 3.3582704845128553, 3.3696701962112936, 3.3810596433099307, 3.392438791114535, 3.4038076049622488, 3.415166050221696, 3.4265140922930772, 3.4378516966082944, 3.4491788286310343, 3.4604954538568893, 3.471801537813458, 3.483097046060446, 3.494381944189777, 3.5056561978256946, 3.516919772624866, 3.52817263427649, 3.539414748502398, 3.5506460810571596, 3.5618665977281903, 3.5730762643358487, 3.5842750467335462, 3.5954629108078495, 3.6066398224785856, 3.617805747698941, 3.6289606524555733, 3.6401045027687084, 3.6512372646922433, 3.662358904313855, 3.6734693877551012, 3.6845686811715237, 3.695656750752747, 3.7067335627225906, 3.717799083339162, 3.728853278894969, 3.7398961157170123, 3.750927560166898, 3.761947578640934, 3.772956137570229, 3.7839532034208054, 3.7949387426936942, 3.805912721925038, 3.8168751076861906, 3.8278258665838263, 3.838764965260032, 3.8496923703924177, 3.860608048694212, 3.871511966914366, 3.882404091837653, 3.8932843902847702, 3.904152829112445, 3.915009375213526, 3.925853995517092, 3.9366866569885457, 3.9475073266297254, 3.958315971478993, 3.9691125586113425, 3.9798970551384962, 3.9906694282090087, 4.001429645008362, 4.012177672759071, 4.022913478720777, 4.033637030190356, 4.044348294502007, 4.055047239027366, 4.065733831175589, 4.076408038393464, 4.087069828165504, 4.0977191680140495, 4.108356025499364, 4.118980368219734, 4.129592163811571, 4.140191379949505, 4.150777984346482, 4.161351944753873, 4.171913228961559, 4.182461804798039, 4.1929976401305185, 4.203520702865016, 4.21403096094646, 4.224528382358781, 4.235012935125011, 4.245484587307387, 4.255943307007436, 4.2663890623660885, 4.276821821563759, 4.287241552820456, 4.297648224395869, 4.308041804589472, 4.3184222617406185, 4.328789564228635, 4.3391436804729215, 4.349484578933041, 4.359812228108827, 4.370126596540466, 4.3804276528086055, 4.390715365534441, 4.400989703379815, 4.411250635047311, 4.421498129280354, 4.431732154863299, 4.4419526806215295, 4.45215967542155, 4.462353108171086, 4.472532947819172, 4.482699163356251, 4.49285172381427, 4.5029905982667655, 4.51311575582897, 4.523227165657897, 4.533324796952439, 4.5434086189534595, 4.553478600943887, 4.563534712248811, 4.573576922235572, 4.583605200313857, 4.593619515935793, 4.603619838596036, 4.613606137831871, 4.623578383223295, 4.633536544393123, 4.643480591007068, 4.653410492773838, 4.663326219445231, 4.673227740816224, 4.683115026725067, 4.692988047053371, 4.702846771726204, 4.71269117071218, 4.722521214023554, 4.732336871716309, 4.742138113890248, 4.75192491068909, 4.761697232300554, 4.771455048956451, 4.781198330932782, 4.79092704854982, 4.800641172172203, 4.810340672209027, 4.82002551911393, 4.829695683385194, 4.839351135565816, 4.848991846243616, 4.858617786051321, 4.868228925666642, 4.877825235812387, 4.8874066872565285, 4.896973250812304, 4.9065248973383015, 4.91606159773855, 4.925583322962605, 4.9350900440056416, 4.944581731908539, 4.954058357757967, 4.963519892686483, 4.972966307872609, 4.982397574540927, 4.991813663962162, 5.001214547453274, 5.010600196377539, 5.019970582144646, 5.029325676210773, 5.038665450078683, 5.047989875297802, 5.057298923464314, 5.066592566221248, 5.075870775258551, 5.085133522313193, 5.094380779169239, 5.103612517657939, 5.112828709657819, 5.12202932709476, 5.1312143419420835, 5.1403837262206435, 5.149537451998904, 5.158675491393029, 5.167797816566968, 5.176904399732531, 5.185995213149493, 5.195070229125656, 5.204129420016946, 5.2131727582275005, 5.222200216209741, 5.2312117664644635, 5.240207381540926, 5.249187034036923, 5.258150696598875, 5.267098341921915, 5.276029942749958, 5.284945471875804, 5.293844902141203, 5.302728206436946, 5.31159535770295, 5.320446328928333, 5.3292810931515, 5.338099623460231, 5.346901892991751, 5.355687874932819, 5.364457542519815, 5.373210869038803, 5.3819478278256385, 5.39066839226603, 5.3993725357956235, 5.408060231900088, 5.416731454115197, 5.4253861760269, 5.434024371271418, 5.442646013535308, 5.451251076555549, 5.45983953411963, 5.468411360065622, 5.476966528282252, 5.485505012708997, 5.494026787336152, 5.502531826204913, 5.511020103407458, 5.51949159308702, 5.527946269437976, 5.536384106705912, 5.544805079187713, 5.553209161231637, 5.561596327237394, 5.56996655165622, 5.578319808990962, 5.586656073796146, 5.594975320678064, 5.603277524294852, 5.611562659356554, 5.619830700625213, 5.62808162291494, 5.636315401091996, 5.644532010074865, 5.6527314248343306, 5.660913620393553, 5.6690785718281465, 5.677226254266253, 5.685356642888617, 5.693469712928667, 5.7015654396725814, 5.709643798459377, 5.7177047646809696, 5.725748313782258, 5.733774421261197, 5.741783062668873, 5.74977421360957, 5.757747849740864, 5.765703946773671, 5.77364248047234, 5.781563426654723, 5.789466761192243, 5.797352460009972, 5.805220499086705, 5.813070854455028, 5.820903502201401, 5.828718418466217, 5.836515579443888, 5.844294961382908, 5.852056540585932, 5.859800293409842, 5.867526196265824, 5.87523422561944, 5.882924357990695, 5.890596569954111, 5.8982508381388, 5.905887139228534, 5.913505449961818, 5.921105747131953, 5.928688007587117, 5.936252208230428, 5.943798326020022, 5.951326337969113, 5.958836221146073, 5.966327952674495, 5.9738015097332635, 5.9812568695566295, 5.988694009434275, 5.996112906711382, 6.003513538788699, 6.010895883122621, 6.018259917225246, 6.025605618664447, 6.032932965063948, 6.040241934103377, 6.047532503518348, 6.054804651100523, 6.062058354697678, 6.069293592213775, 6.076510341609025, 6.083708580899958, 6.090888288159491, 6.098049441516985, 6.105192019158331, 6.112315999325994, 6.119421360319098, 6.12650808049348, 6.133576138261762, 6.140625512093413, 6.14765618051482, 6.154668122109347, 6.161661315517399, 6.168635739436504, 6.175591372621353, 6.182528193883882, 6.189446182093329, 6.196345316176302, 6.2032255751168455, 6.210086937956493, 6.216929383794347, 6.2237528917871305, 6.230557441149252, 6.237343011152876, 6.244109581127979, 6.250857130462418, 6.257585638601983, 6.264295085050473, 6.2709854493697526, 6.277656711179809, 6.284308850158824, 6.290941846043228, 6.297555678627763, 6.3041503277655515, 6.310725773368146, 6.3172819954056, 6.3238189739065245, 6.330336688958147, 6.336835120706381, 6.343314249355872, 6.349774055170076, 6.356214518471298, 6.3626356196407725, 6.369037339118714, 6.375419657404373, 6.381782555056099, 6.388126012691405, 6.3944500109870175, 6.400754530678942, 6.407039552562519, 6.413305057492478, 6.41955102638301, 6.425777440207805, 6.43198428000013, 6.438171526852875, 6.44433916191861, 6.450487166409652, 6.4566155215981125, 6.462724208815958, 6.468813209455067, 6.47488250496729, 6.480932076864497, 6.486961906718646, 6.492971976161829, 6.4989622668863305, 6.504932760644688, 6.51088343924974, 6.516814284574689, 6.522725278553152, 6.528616403179216, 6.534487640507492, 6.540338972653174, 6.5461703817920895, 6.551981850160756, 6.557773360056431, 6.563544893837173, 6.569296433921888, 6.575027962790388, 6.580739462983441, 6.586430917102828, 6.592102307811392, 6.597753617833094, 6.603384829953064, 6.608995927017651, 6.614586891934482, 6.620157707672509, 6.625708357262059, 6.631238823794893, 6.63674909042425, 6.642239140364901, 6.6477089568932035, 6.653158523347147, 6.658587823126408, 6.663996839692396, 6.669385556568308, 6.67475395733918, 6.680102025651932, 6.685429745215419, 6.690737099800485, 6.6960240732400065, 6.701290649428944, 6.706536812324396, 6.711762545945639, 6.716967834374183, 6.722152661753813, 6.727317012290652, 6.732460870253188, 6.737584219972339, 6.742687045841493, 6.747769332316558, 6.752831063916007, 6.757872225220929, 6.762892800875071, 6.767892775584891, 6.772872134119595, 6.777830861311198, 6.7827689420545525, 6.787686361307411, 6.792583104090464, 6.7974591554873784, 6.802314500644859, 6.807149124772682, 6.811963013143744, 6.816756151094106, 6.821528524023038, 6.826280117393064, 6.83101091673001, 6.835720907623037, 6.840410075724696, 6.845078406750972, 6.8497258864813135, 6.8543525007586945, 6.858958235489645, 6.863543076644297, 6.868107010256431, 6.872650022423514, 6.877172099306741, 6.881673227131084, 6.886153392185325, 6.890612580822104, 6.895050779457962, 6.899467974573373, 6.903864152712795, 6.908239300484711, 6.912593404561656, 6.916926451680277, 6.921238428641364, 6.925529322309881, 6.929799119615027, 6.934047807550256, 6.9382753731733295, 6.9424818036063485, 6.946667086035797, 6.950831207712578, 6.954974155952055, 6.959095918134088, 6.963196481703078, 6.967275834167994, 6.971333963102423, 6.9753708561445995, 6.979386500997446, 6.983380885428614, 6.987353997270515, 6.991305824420362, 6.995236354840203, 6.999145576556959, 7.003033477662467, 7.006900046313502, 7.01074527073183, 7.014569139204224, 7.018371640082524, 7.02215276178365, 7.025912492789647, 7.029650821647727, 7.03336773697029, 7.037063227434967, 7.040737281784652, 7.04438988882754, 7.048021037437158, 7.051630716552393, 7.05521891517754, 7.058785622382325, 7.062330827301934, 7.065854519137062, 7.069356687153934, 7.072837320684332, 7.076296409125652, 7.079733941940903, 7.0831499086587675, 7.086544298873617, 7.089917102245553, 7.093268308500429, 7.0965979074298895, 7.099905888891398, 7.103192242808273, 7.106456959169705, 7.109700028030809, 7.112921439512628, 7.116121183802187, 7.11929925115251, 7.1224556318826515, 7.125590316377731, 7.128703295088953, 7.131794558533645, 7.134864097295285, 7.137911902023522, 7.140937963434217, 7.14394227230946, 7.146924819497607, 7.149885595913301, 7.152824592537504, 7.155741800417521, 7.158637210667034, 7.161510814466119, 7.164362603061281, 7.167192567765476, 7.170000699958145, 7.172786991085224, 7.175551432659192, 7.178294016259079, 7.181014733530501, 7.183713576185677, 7.1863905360034686, 7.189045604829392, 7.191678774575645, 7.194290037221137, 7.196879384811506, 7.199446809459155, 7.201992303343262, 7.204515858709809, 7.207017467871612, 7.209497123208331, 7.21195481716651, 7.2143905422595855, 7.216804291067916, 7.219196056238806, 7.221565830486521, 7.223913606592319, 7.226239377404466, 7.228543135838265, 7.230824874876063, 7.233084587567289, 7.235322267028467, 7.2375379064432375, 7.239731499062375, 7.241903038203822, 7.244052517252688, 7.246179929661292, 7.248285268949163, 7.250368528703073, 7.252429702577054, 7.254468784292409, 7.256485767637747, 7.25848064646898, 7.260453414709368, 7.262404066349512, 7.26433259544739, 7.266238996128369, 7.2681232625852195, 7.269985389078142, 7.271825369934776, 7.273643199550218, 7.2754388723870465, 7.277212382975329, 7.2789637259126465, 7.280692895864102, 7.282399887562348, 7.284084695807586, 7.285747315467603, 7.287387741477769, 7.289005968841062, 7.290601992628079, 7.292175807977059, 7.293727410093886, 7.295256794252107, 7.296763955792958, 7.298248890125362, 7.299711592725949, 7.301152059139075, 7.302570284976831, 7.303966265919051, 7.3053399977133395, 7.306691476175068, 7.308020697187403, 7.309327656701301, 7.31061235073554, 7.311874775376719, 7.3131149267792726, 7.314332801165485, 7.315528394825501, 7.316701704117332, 7.317852725466877, 7.3189814553679255, 7.32008789038217, 7.321172027139214, 7.322233862336595, 7.3232733927397735, 7.324290615182163, 7.3252855265651275, 7.3262581238579925, 7.327208404098059, 7.328136364390608, 7.3290420019089115, 7.329925313894242, 7.330786297655877, 7.3316249505711095, 7.33244127008526, 7.333235253711675, 7.334006899031745, 7.3347562036949014, 7.335483165418635, 7.336187781988494, 7.336870051258092, 7.337529971149123, 7.338167539651352, 7.3387827548226365, 7.339375614788923, 7.339946117744261, 7.340494261950796, 7.3410200457387855, 7.3415234675066054, 7.34200452572074, 7.342463218915805, 7.342899545694542, 7.343313504727823, 7.343705094754657, 7.344074314582194, 7.344421163085727, 7.344745639208692, 7.345047741962681, 7.345327470427436, 7.345584823750856, 7.3458198011489975, 7.346032401906077, 7.346222625374479, 7.346390470974749, 7.346535938195599, 7.346659026593911, 7.346759735794737, 7.346838065491298, 7.34689401544499, 7.346927585485378, 7.346938775510203], 'Maximum_Distance': [0.0, 0.0002014202934608572, 0.0008056787196007175, 0.0018127679157213512, 0.003222675610758719, 0.0050353846254325004, 0.007250872872455406, 0.00986911335680231, 0.012890074176039178, 0.016313718520711788, 0.02014000467479423, 0.024368886016197237, 0.029000311017336182, 0.034034223245759034, 0.03947056136483391, 0.045309259134496405, 0.05155024541205681, 0.05819344415306685, 0.0652387744122463, 0.07268615034446936, 0.08053548120581058, 0.08878667135465043, 0.09743962025284096, 0.10649422246693044, 0.11595036766944841, 0.12580794064024975, 0.13606682126791866, 0.1467268845512322, 0.1577880006006834, 0.1692500346400639, 0.18111284700810598, 0.1933762931601848, 0.20604022367007893, 0.21910448423179174, 0.2325689156614311, 0.24643335389914903, 0.26069763001114105, 0.2753615701917043, 0.2904249957653553, 0.3058877231890072, 0.3217495640542059, 0.3380103250894264, 0.3546698081624269, 0.3717278102826635, 0.3891841236037639, 0.40703853542605883, 0.4252908281991749, 0.4439407795246846, 0.4629881621588167, 0.48243274401522473, 0.502274288167815, 0.5225125528536342, 0.5431472914758133, 0.5641782526065741, 0.5856051799902925, 0.6074278125466201, 0.6296458843736663, 0.6522591247512373, 0.6752672581441354, 0.6986700042055156, 0.7224670777803032, 0.7466581889086668, 0.7712430428295509, 0.7962213399842698, 0.8215927760201541, 0.847357041794264, 0.8735138233771503, 0.900062802056685, 0.927003654341941, 0.9543360519671344, 0.9820596618956268, 1.0101741463239797, 1.0386791626860732, 1.0675743636572783, 1.0968593971586889, 1.1265339063614155, 1.1565975296909274, 1.1870499008314614, 1.2178906487304857, 1.249119397603219, 1.2807357669372128, 1.3127393714969837, 1.3451298213287108, 1.3779067217649823, 1.4110696734296126, 1.4446182722425005, 1.4785521094245557, 1.5128707715026832, 1.5475738403148138, 1.5826608930150057, 1.6181315020785962, 1.653985235307406, 1.6902216558350123, 1.726840322132065, 1.7638407880116715, 1.8012226026348312, 1.8389853105159328, 1.8771284515282942, 1.9156515609097817, 1.9545541692684643, 1.9938358025883343, 2.03349598223509, 2.073534224961955, 2.113950042915578, 2.1547429436419705, 2.195912430092514, 2.237458000630005, 2.2793791490347797, 2.3216753645108708, 2.364346131692246, 2.4073909306490693, 2.450809236894051, 2.49460052138883, 2.538764250550427, 2.5832998862577385, 2.628206885858095, 2.6734847021738832, 2.7191327835091945, 2.7651505736565687, 2.811537511903752, 2.858293033040541, 2.905416567365668, 2.952907540693736, 3.0007653743622233, 3.0489894852385295, 3.0975792857270825, 3.146534183776497, 3.1958535828867904, 3.245536882116646, 3.295583476090745, 3.3459927550071322, 3.3967641046446526, 3.4478969063704303, 3.499390537147419, 3.5512443695419744, 3.603457771731511, 3.656030107512205, 3.70896073630673, 3.762249013172085, 3.8158942888074274, 3.8698959095620067, 3.9242532174431144, 3.9789655501241072, 4.034032240952479, 4.089452618957979, 4.145226008860792, 4.201351731079761, 4.257829101740676, 4.314657432684594, 4.371836031476243, 4.429364201412436, 4.487241241530581, 4.545466446617201, 4.604039107216557, 4.662958509639259, 4.72222393597098, 4.781834664081212, 4.841789967632033, 4.902089116086998, 4.9627313747200095, 5.0237160046242835, 5.0850422627213545, 5.146709401770113, 5.2087166703759396, 5.271063312999828, 5.3337485699676135, 5.396771677479225, 5.460131867617984, 5.523828368359977, 5.587860403583433, 5.652227193078215, 5.716927952555304, 5.781961893656362, 5.847328223963348, 5.9130261470081535, 5.979054862282313, 6.045413565246772, 6.112101447341677, 6.179117695996227, 6.246461494638591, 6.314132022705818, 6.382128455653895, 6.450449964967737, 6.5190957181713145, 6.58806487883779, 6.657356606599706, 6.726970057159225, 6.796904382298422, 6.8671587298896135, 6.937732243905746, 7.008624064430822, 7.079833327670384, 7.151359165962031, 7.223200707785994, 7.295357077775754, 7.367827396728716, 7.440610781616919, 7.513706345597785, 7.587113198024929, 7.660830444459025, 7.734857186678685, 7.809192522691424, 7.883835546744623, 7.958785349336596, 8.034041017227645, 8.109601633451206, 8.18546627732503, 8.261634024462362, 8.33810394678323, 8.41487511252578, 8.49194658625758, 8.56931742888704, 8.64698669767486, 8.724953446245518, 8.803216724598782, 8.8817755791213, 8.960629052598229, 9.039776184224882, 9.119216009618423, 9.198947560829657, 9.278969866354798, 9.359281951147304, 9.439882836629767, 9.520771540705828, 9.601947077772163, 9.683408458730469, 9.76515469099952, 9.847184778527284, 9.929497721803024, 10.0120925178695, 10.094968160335188, 10.178123639386548, 10.261557941800307, 10.34527005095581, 10.429258946847433, 10.513523606096989, 10.598063001966187, 10.682876104369175, 10.76796187988506, 10.85331929177053, 10.938947299972439, 11.024844861140547, 11.111010928640177, 11.19744445256498, 11.28414437974975, 11.371109653783225, 11.458339215021004, 11.545832000598397, 11.633586944443428, 11.721602977289798, 11.809879026689924, 11.898414017028001, 11.98720686953312, 12.07625650229238, 12.165561830264116, 12.255121765291085, 12.344935216113756, 12.435001088383563, 12.525318284676269, 12.615885704505352, 12.706702244335377, 12.797766797595468, 12.889078254692782, 12.980635503026031, 13.07243742699903, 13.164482908034318, 13.256770824586752, 13.34930005215719, 13.4420694633062, 13.535077927667777, 13.628324311963128, 13.721807480014506, 13.815526292759005, 13.909479608262457, 14.003666281733368, 14.098085165536844, 14.19273510920858, 14.287614959468902, 14.38272356023674, 14.478059752643848, 14.573622375048778, 14.669410263051146, 14.765422249505768, 14.86165716453688, 14.958113835552423, 15.05479108725829, 15.15168774167268, 15.248802618140449, 15.346134533347461, 15.443682301335045, 15.541444733514446, 15.639420638681274, 15.737608823030037, 15.836008090168697, 15.93461724113325, 16.03343507440228, 16.132460385911706, 16.231691969069356, 16.33112861476969, 16.430769111408555, 16.530612244897952, 16.630656798680825, 16.73090155374583, 16.831345288642282, 16.931986779494956, 17.032824800019057, 17.13385812153512, 17.23508551298401, 17.336505740941927, 17.438117569635356, 17.53991976095626, 17.641911074477058, 17.744090267465797, 17.846456094901228, 17.94900730948807, 18.051742661672122, 18.15466089965555, 18.2577607694121, 18.361041014702398, 18.464500377089244, 18.568137595952944, 18.67195140850672, 18.775940549812006, 18.88010375279396, 18.984439748256776, 19.088947264899318, 19.193625029330466, 19.298471766084695, 19.403486197637594, 19.508667044421472, 19.614013024840894, 19.719522855288332, 19.825195250159798, 19.931028921870524, 20.037022580870588, 20.143174935660742, 20.24948469280802, 20.3559505569616, 20.462571230868488, 20.569345415389446, 20.676271809514745, 20.783349110379987, 20.89057601328207, 20.997951211695028, 21.105473397285923, 21.213141259930886, 21.320953487730968, 21.428908767028226, 21.537005782421616, 21.64524321678313, 21.753619751273803, 21.862134065359754, 21.9707848368283, 22.0795707418041, 22.188490454765205, 22.297542648559315, 22.406725994419837, 22.51603916198218, 22.625480819299884, 22.73504963286088, 22.84474426760378, 22.954563386934055, 23.064505652740408, 23.17456972541101, 23.284754263849884, 23.39505792549319, 23.505479366325634, 23.616017240896795, 23.726670202337566, 23.837436902376496, 23.948315991356353, 24.0593061182504, 24.170405930678967, 24.28161407492591, 24.39292919595508, 24.504349937426863, 24.61587494171468, 24.727502849921546, 24.83923230189663, 24.95106193625183, 25.06299039037832, 25.17501630046324, 25.287138301506214, 25.39935502733602, 25.511665110627302, 25.624067182917123, 25.736559874621694, 25.84914181505308, 25.961811632435868, 26.074567953923893, 26.187409405616933, 26.300334612577537, 26.4133421988477, 26.526430787465628, 26.639599000482566, 26.752845458979536, 26.86616878308421, 26.97956759198762, 27.093040503961088, 27.20658613637296, 27.320203105705563, 27.43389002757198, 27.547645516732935, 27.661468187113716, 27.775356651820978, 27.889309523159714, 28.003325412650177, 28.117402931044712, 28.23154068834476, 28.345737293817788, 28.45999135601415, 28.57430148278423, 28.68866628129514, 28.803084358047922, 28.91755431889444, 29.032074769054308, 29.14664431313201, 29.261261555133803, 29.37592509848475, 29.490633546045782, 29.60538550013064, 29.72017956252299, 29.835014334493415, 29.949888416816467, 30.064800409787672, 30.17974891324069, 30.294732526564253, 30.409749848719354, 30.524799478256202, 30.639880013331364, 30.754990051724832, 30.870128190857137, 30.985293027806364, 31.100483159325353, 31.21569718185867, 31.33093369155981, 31.446191284308302, 31.561468555726712, 31.676764101197886, 31.792076515882, 31.907404394733632, 32.02274633251903, 32.13810092383303, 32.25346676311636, 32.36884244467269, 32.48422656268572, 32.59961771123641, 32.715014484320044, 32.830415475863305, 32.945819279741585, 33.06122448979592, 33.176629699850245, 33.292033503728526, 33.4074344952718, 33.52283126835542, 33.63822241690611, 33.75360653491915, 33.868982216475466, 33.984348055758815, 34.09970264707281, 34.215044584858184, 34.330372463709836, 34.44568487839394, 34.560980423865125, 34.67625769528354, 34.791515288032016, 34.90675179773317, 35.021965820266495, 35.137155951785466, 35.25232078873472, 35.36745892786699, 35.482568966260466, 35.59764950133563, 35.71269913087248, 35.82771645302757, 35.94270006635115, 36.05764856980415, 36.172560562775374, 36.28743464509843, 36.40226941706884, 36.51706347946118, 36.631815433546066, 36.74652388110707, 36.86118742445804, 36.97580466645982, 37.09037421053752, 37.20489466069741, 37.319364621543905, 37.43378269829672, 37.54814749680763, 37.66245762357767, 37.77671168577405, 37.89090829124709, 38.00504604854713, 38.119123566941674, 38.23313945643213, 38.34709232777085, 38.460980792478125, 38.57480346285889, 38.68855895201986, 38.802245873886264, 38.91586284321887, 39.02940847563075, 39.14288138760422, 39.256280196507625, 39.369603520612294, 39.48284997910928, 39.596018192126216, 39.70910678074414, 39.82211436701429, 39.93503957397489, 40.04788102566794, 40.16063734715596, 40.27330716453874, 40.385889104970154, 40.4983817966747, 40.61078386896454, 40.72309395225582, 40.83531067808563, 40.947432679128596, 41.059458589213506, 41.17138704334001, 41.2832166776952, 41.39494612967029, 41.50657403787715, 41.618099042164964, 41.72951978363675, 41.840834904665925, 41.95204304891286, 42.06314286134144, 42.17413298823547, 42.28501207721533, 42.395778777254286, 42.50643173869505, 42.6169696132662, 42.72739105409863, 42.837694715741954, 42.94787925418082, 43.05794332685144, 43.16788559265777, 43.27770471198806, 43.387399346730945, 43.496968160291964, 43.60640981760966, 43.715722985172, 43.82490633103251, 43.93395852482663, 44.04287823778775, 44.15166414276353, 44.26031491423209, 44.36882922831803, 44.477205762808694, 44.585443197170214, 44.69354021256361, 44.80149549186086, 44.909307719660944, 45.016975582305896, 45.124497767896806, 45.23187296630976, 45.339099869211836, 45.44617717007709, 45.55310356420239, 45.65987774872335, 45.76649842263024, 45.87296428678382, 45.979274043931085, 46.08542639872123, 46.19142005772131, 46.297253729432036, 46.40292612430351, 46.50843595475093, 46.61378193517035, 46.71896278195423, 46.82397721350714, 46.92882395026137, 47.0335017146925, 47.138009231335054, 47.24234522679787, 47.34650842977982, 47.45049757108512, 47.5543113836389, 47.657948602502586, 47.76140796488944, 47.86468821017974, 47.967788079936284, 48.0707063179197, 48.17344167010377, 48.2759928846906, 48.378358712126044, 48.48053790511477, 48.58252921863557, 48.68433140995648, 48.78594323864991, 48.88736346660781, 48.98859085805671, 49.089624179572766, 49.190462200096874, 49.29110369094954, 49.391547425846014, 49.49179218091103, 49.59183673469387, 49.69167986818328, 49.791320364822155, 49.89075701052248, 49.989988593680124, 50.08901390518954, 50.187831738458584, 50.28644088942314, 50.384840156561786, 50.483028340910565, 50.58100424607738, 50.67876667825677, 50.77631444624438, 50.87364636145138, 50.97076123791914, 51.06765789233355, 51.16433514403942, 51.26079181505496, 51.357026730086076, 51.45303871654069, 51.54882660454305, 51.64438922694799, 51.73972541935508, 51.83483402012295, 51.92971387038325, 52.02436381405498, 52.11878269785848, 52.21296937132938, 52.306922686832834, 52.400641499577326, 52.49412466762869, 52.58737105192405, 52.680379516285626, 52.77314892743465, 52.865678155005085, 52.95796607155753, 53.05001155259281, 53.14181347656581, 53.23337072489905, 53.324682181996366, 53.41574673525646, 53.506563275086485, 53.59713069491556, 53.68744789120828, 53.77751376347807, 53.867327214300744, 53.95688714932772, 54.04619247729944, 54.1352421100587, 54.22403496256382, 54.31256995290191, 54.400846002302046, 54.48886203514841, 54.576616978993435, 54.66410976457084, 54.7513393258086, 54.8383045998421, 54.925004527026864, 55.01143805095166, 55.09760411845127, 55.18350167961939, 55.26912968782132, 55.35448709970675, 55.43957287522266, 55.52438597762565, 55.608925373494834, 55.6931900327444, 55.77717892863601, 55.860891037791525, 55.944325340205275, 56.02748081925664, 56.11035646172234, 56.192951257788806, 56.275264201064566, 56.35729428859229, 56.439040520861354, 56.520501901819664, 56.601677438886, 56.68256614296206, 56.76316702844453, 56.843479113237045, 56.92350141876219, 57.00323296997342, 57.08267279536694, 57.161819926993616, 57.24067340047053, 57.31923225499305, 57.39749553334632, 57.47546228191697, 57.5531315507048, 57.63050239333426, 57.70757386706604, 57.78434503280859, 57.86081495512947, 57.936982702266796, 58.01284734614062, 58.088407962364194, 58.163663630255236, 58.23861343284722, 58.31325645690041, 58.387591792913135, 58.46161853513282, 58.53533578156691, 58.60874263399403, 58.68183819797492, 58.75462158286311, 58.82709190181609, 58.899248271805824, 58.9710898136298, 59.042615651921444, 59.113824915161004, 59.18471673568608, 59.25529024970223, 59.3255445972934, 59.39547892243261, 59.465092372992125, 59.53438410075405, 59.603353261420516, 59.67199901462409, 59.740320523937925, 59.808316956886, 59.87598748495324, 59.94333128359562, 60.01034753225015, 60.077035414345055, 60.143394117309505, 60.209422832583684, 60.275120755628464, 60.34048708593546, 60.40552102703653, 60.470221786513605, 60.534588576008396, 60.598620611231865, 60.66231711197385, 60.72567730211261, 60.78870040962423, 60.851385666592016, 60.913732309215895, 60.97573957782171, 61.0374067168705, 61.09873297496755, 61.159717604871815, 61.22035986350485, 61.28065901195979, 61.34061431551063, 61.400225043620864, 61.45949046995256, 61.518409872375265, 61.57698253297464, 61.63520773806126, 61.69308477817941, 61.75061294811559, 61.80779154690723, 61.86461987785113, 61.921097248512076, 61.97722297073104, 62.03299636063385, 62.08841673863935, 62.143483429467715, 62.19819576214872, 62.25255307002983, 62.306554690784395, 62.36019996641974, 62.413488243285094, 62.466418872079636, 62.51899120786032, 62.57120461004986, 62.62305844244442, 62.67455207322142, 62.72568487494717, 62.77645622458469, 62.8268655035011, 62.87691209747519, 62.92659539670504, 62.975914795815356, 63.024869693864744, 63.073459494353294, 63.12168360522961, 63.16954143889811, 63.21703241222615, 63.26415594655128, 63.31091146768808, 63.35729840593526, 63.40331619608265, 63.448964277417936, 63.494242093733725, 63.53914909333409, 63.5836847290414, 63.62784845820302, 63.67163974269778, 63.715058048942765, 63.758102847899586, 63.800773615080956, 63.843069830557056, 63.884990978961845, 63.926536549499325, 63.96770603594985, 64.00849893667626, 64.04891475462989, 64.08895299735674, 64.12861317700349, 64.16789481032339, 64.20679741868204, 64.24532052806354, 64.28346366907589, 64.321226376957, 64.35860819158016, 64.39560865745977, 64.43222732375682, 64.4684637442844, 64.50431747751325, 64.53978808657682, 64.57487513927704, 64.60957820808916, 64.64389687016728, 64.67783070734933, 64.7113793061622, 64.74454225782685, 64.77731915826313, 64.80970960809485, 64.84171321265461, 64.8733295819886, 64.90455833086136, 64.93539907876037, 64.96585144990092, 64.9959150732304, 65.02558958243316, 65.05487461593457, 65.08376981690577, 65.11227483326785, 65.14038931769619, 65.1681129276247, 65.19544532524988, 65.22238617753514, 65.24893515621469, 65.27509193779755, 65.30085620357168, 65.32622763960757, 65.35120593676227, 65.37579079068317, 65.39998190181151, 65.42377897538633, 65.4471817214477, 65.4701898548406, 65.49280309521818, 65.5150211670452, 65.53684379960154, 65.55827072698526, 65.57930168811602, 65.59993642673818, 65.62017469142401, 65.64001623557661, 65.65946081743301, 65.67850820006714, 65.69715815139264, 65.71541044416576, 65.73326485598807, 65.75072116930916, 65.7677791714294, 65.78443865450241, 65.80069941553761, 65.81656125640282, 65.83202398382646, 65.84708740940012, 65.86175134958069, 65.87601562569269, 65.88988006393042, 65.90334449536003, 65.91640875592175, 65.92907268643165, 65.94133613258371, 65.95319894495178, 65.96466097899116, 65.97572209504061, 65.9863821583239, 65.9966410389516, 66.0064986119224, 66.01595475712489, 66.02500935933901, 66.03366230823718, 66.04191349838602, 66.04976282924737, 66.05721020517959, 66.06425553543875, 66.07089873417976, 66.07713972045734, 66.08297841822699, 66.08841475634607, 66.0934486685745, 66.09808009357563, 66.10230897491705, 66.10613526107112, 66.1095589054158, 66.11257986623504, 66.11519810671938, 66.1174135949664, 66.11922630398108, 66.1206362116761, 66.12164330087224, 66.12224755929837, 66.12244897959184]}\n" ], [ "import pandas as pd\nDf=pd.DataFrame(data)\nprint(Df)", " Angle_of_projection Horizontal_Range Time_of_Flight Maximum_Distance\n0 0.0 0.0 0.000000 0.000000\n1 0.1 0.1 0.012823 0.000201\n2 0.2 0.2 0.025646 0.000806\n3 0.3 0.3 0.038468 0.001813\n4 0.4 0.4 0.051291 0.003223\n.. ... ... ... ...\n896 89.6 89.6 7.346760 66.119226\n897 89.7 89.7 7.346838 66.120636\n898 89.8 89.8 7.346894 66.121643\n899 89.9 89.9 7.346928 66.122248\n900 90.0 90.0 7.346939 66.122449\n\n[901 rows x 4 columns]\n" ], [ "Df.to_csv('Projectile.csv')", "_____no_output_____" ], [ "df=pd.read_csv('Projectile.csv')\ndf.head()", "_____no_output_____" ], [ "plt.figure(figsize=[10,10])\n\nplt.subplot(2,2,1)\nplt.semilogy(df.Angle_of_projection,df.Horizontal_Range)\nplt.xlabel('N')\nplt.ylabel('R')\nplt.title('Angle of projection with Horizontal Range')\n\nplt.subplot(2,2,2)\nplt.semilogy(df.Angle_of_projection,df.Time_of_Flight)\nplt.xlabel('N')\nplt.ylabel('T')\nplt.title('Angle of projecton with Time of Flight')\n\nplt.subplot(2,2,3)\nplt.semilogy(df.Angle_of_projection,df.Maximum_Distance)\nplt.xlabel('N')\nplt.ylabel('H')\nplt.title('Angle of projection with Maximum Distance')", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a7b4b08c714a17196c10f3665d84afbac5c8c75
39,105
ipynb
Jupyter Notebook
Course_1_Part_6_Lesson_2_Notebook.ipynb
kaburelabs/Tensorflow2.0-coursera
9af468b2c90fbaa3a1e53de2238322c8f436cb6a
[ "MIT" ]
2
2020-02-20T03:49:44.000Z
2020-05-23T05:38:56.000Z
Course_1_Part_6_Lesson_2_Notebook.ipynb
kaburelabs/Tensorflow2.0-coursera
9af468b2c90fbaa3a1e53de2238322c8f436cb6a
[ "MIT" ]
null
null
null
Course_1_Part_6_Lesson_2_Notebook.ipynb
kaburelabs/Tensorflow2.0-coursera
9af468b2c90fbaa3a1e53de2238322c8f436cb6a
[ "MIT" ]
1
2020-02-20T03:49:43.000Z
2020-02-20T03:49:43.000Z
79.643585
16,606
0.715868
[ [ [ "#Improving Computer Vision Accuracy using Convolutions\n\nIn the previous lessons you saw how to do fashion recognition using a Deep Neural Network (DNN) containing three layers -- the input layer (in the shape of the data), the output layer (in the shape of the desired output) and a hidden layer. You experimented with the impact of different sized of hidden layer, number of training epochs etc on the final accuracy.\n\nFor convenience, here's the entire code again. Run it and take a note of the test accuracy that is printed out at the end. ", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nmnist = tf.keras.datasets.fashion_mnist\n(training_images, training_labels), (test_images, test_labels) = mnist.load_data()\ntraining_images=training_images / 255.0\ntest_images=test_images / 255.0\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(128, activation=tf.nn.relu),\n tf.keras.layers.Dense(10, activation=tf.nn.softmax)\n])\nmodel.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])\nmodel.fit(training_images, training_labels, epochs=5)\n\ntest_loss = model.evaluate(test_images, test_labels)", "Epoch 1/5\n60000/60000==============================] - 4s 74us/sample - loss: 0.4989 - acc: 0.8252\nEpoch 2/5\n60000/60000==============================] - 3s 56us/sample - loss: 0.3745 - acc: 0.8652\nEpoch 3/5\n60000/60000==============================] - 3s 55us/sample - loss: 0.3378 - acc: 0.8769\nEpoch 4/5\n60000/60000==============================] - 3s 55us/sample - loss: 0.3126 - acc: 0.8854\nEpoch 5/5\n60000/60000==============================] - 3s 55us/sample - loss: 0.2943 - acc: 0.8915\n10000/10000==============================] - 0s 39us/sample - loss: 0.3594 - acc: 0.8744\n" ] ], [ [ "Your accuracy is probably about 89% on training and 87% on validation...not bad...But how do you make that even better? One way is to use something called Convolutions. I'm not going to details on Convolutions here, but the ultimate concept is that they narrow down the content of the image to focus on specific, distinct, details. \n\nIf you've ever done image processing using a filter (like this: https://en.wikipedia.org/wiki/Kernel_(image_processing)) then convolutions will look very familiar.\n\nIn short, you take an array (usually 3x3 or 5x5) and pass it over the image. By changing the underlying pixels based on the formula within that matrix, you can do things like edge detection. So, for example, if you look at the above link, you'll see a 3x3 that is defined for edge detection where the middle cell is 8, and all of its neighbors are -1. In this case, for each pixel, you would multiply its value by 8, then subtract the value of each neighbor. Do this for every pixel, and you'll end up with a new image that has the edges enhanced.\n\nThis is perfect for computer vision, because often it's features that can get highlighted like this that distinguish one item for another, and the amount of information needed is then much less...because you'll just train on the highlighted features.\n\nThat's the concept of Convolutional Neural Networks. Add some layers to do convolution before you have the dense layers, and then the information going to the dense layers is more focussed, and possibly more accurate.\n\nRun the below code -- this is the same neural network as earlier, but this time with Convolutional layers added first. It will take longer, but look at the impact on the accuracy:", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nprint(tf.__version__)\nmnist = tf.keras.datasets.fashion_mnist\n(training_images, training_labels), (test_images, test_labels) = mnist.load_data()\ntraining_images=training_images.reshape(60000, 28, 28, 1)\ntraining_images=training_images / 255.0\ntest_images = test_images.reshape(10000, 28, 28, 1)\ntest_images=test_images/255.0\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(64, (3,3), activation='relu', input_shape=(28, 28, 1)),\n tf.keras.layers.MaxPooling2D(2, 2),\n tf.keras.layers.Conv2D(64, (3,3), activation='relu'),\n tf.keras.layers.MaxPooling2D(2,2),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(128, activation='relu'),\n tf.keras.layers.Dense(10, activation='softmax')\n])\nmodel.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])\nmodel.summary()\nmodel.fit(training_images, training_labels, epochs=5)\ntest_loss = model.evaluate(test_images, test_labels)\n", "1.12.0\nModel: \"sequential_3\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d_6 (Conv2D) (None, 26, 26, 64) 640 \n_________________________________________________________________\nmax_pooling2d_4 (MaxPooling2 (None, 13, 13, 64) 0 \n_________________________________________________________________\nconv2d_7 (Conv2D) (None, 11, 11, 64) 36928 \n_________________________________________________________________\nmax_pooling2d_5 (MaxPooling2 (None, 5, 5, 64) 0 \n_________________________________________________________________\nflatten_3 (Flatten) (None, 1600) 0 \n_________________________________________________________________\ndense_6 (Dense) (None, 128) 204928 \n_________________________________________________________________\ndense_7 (Dense) (None, 10) 1290 \n=================================================================\nTotal params: 243,786\nTrainable params: 243,786\nNon-trainable params: 0\n_________________________________________________________________\nEpoch 1/5\n60000/60000==============================] - 6s 95us/sample - loss: 0.4325 - acc: 0.8411\nEpoch 2/5\n60000/60000==============================] - 6s 92us/sample - loss: 0.2930 - acc: 0.8914\nEpoch 3/5\n60000/60000==============================] - 5s 91us/sample - loss: 0.2463 - acc: 0.9079\nEpoch 4/5\n60000/60000==============================] - 5s 90us/sample - loss: 0.2156 - acc: 0.9187\nEpoch 5/5\n60000/60000==============================] - 6s 92us/sample - loss: 0.1874 - acc: 0.9307\n10000/10000==============================] - 0s 42us/sample - loss: 0.2589 - acc: 0.9089\n" ] ], [ [ "It's likely gone up to about 93% on the training data and 91% on the validation data. \n\nThat's significant, and a step in the right direction!\n\nTry running it for more epochs -- say about 20, and explore the results! But while the results might seem really good, the validation results may actually go down, due to something called 'overfitting' which will be discussed later. \n\n(In a nutshell, 'overfitting' occurs when the network learns the data from the training set really well, but it's too specialised to only that data, and as a result is less effective at seeing *other* data. For example, if all your life you only saw red shoes, then when you see a red shoe you would be very good at identifying it, but blue suade shoes might confuse you...and you know you should never mess with my blue suede shoes.)\n\nThen, look at the code again, and see, step by step how the Convolutions were built:", "_____no_output_____" ], [ "Step 1 is to gather the data. You'll notice that there's a bit of a change here in that the training data needed to be reshaped. That's because the first convolution expects a single tensor containing everything, so instead of 60,000 28x28x1 items in a list, we have a single 4D list that is 60,000x28x28x1, and the same for the test images. If you don't do this, you'll get an error when training as the Convolutions do not recognize the shape. \n\n\n\n```\nimport tensorflow as tf\nmnist = tf.keras.datasets.fashion_mnist\n(training_images, training_labels), (test_images, test_labels) = mnist.load_data()\ntraining_images=training_images.reshape(60000, 28, 28, 1)\ntraining_images=training_images / 255.0\ntest_images = test_images.reshape(10000, 28, 28, 1)\ntest_images=test_images/255.0\n```\n\n", "_____no_output_____" ], [ "Next is to define your model. Now instead of the input layer at the top, you're going to add a Convolution. The parameters are:\n\n1. The number of convolutions you want to generate. Purely arbitrary, but good to start with something in the order of 32\n2. The size of the Convolution, in this case a 3x3 grid\n3. The activation function to use -- in this case we'll use relu, which you might recall is the equivalent of returning x when x>0, else returning 0\n4. In the first layer, the shape of the input data.\n\nYou'll follow the Convolution with a MaxPooling layer which is then designed to compress the image, while maintaining the content of the features that were highlighted by the convlution. By specifying (2,2) for the MaxPooling, the effect is to quarter the size of the image. Without going into too much detail here, the idea is that it creates a 2x2 array of pixels, and picks the biggest one, thus turning 4 pixels into 1. It repeats this across the image, and in so doing halves the number of horizontal, and halves the number of vertical pixels, effectively reducing the image by 25%.\n\nYou can call model.summary() to see the size and shape of the network, and you'll notice that after every MaxPooling layer, the image size is reduced in this way. \n\n\n```\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(32, (3,3), activation='relu', input_shape=(28, 28, 1)),\n tf.keras.layers.MaxPooling2D(2, 2),\n```\n\n", "_____no_output_____" ], [ "Add another convolution\n\n\n\n```\n tf.keras.layers.Conv2D(64, (3,3), activation='relu'),\n tf.keras.layers.MaxPooling2D(2,2)\n```\n\n", "_____no_output_____" ], [ "Now flatten the output. After this you'll just have the same DNN structure as the non convolutional version\n\n```\n tf.keras.layers.Flatten(),\n```\n\n", "_____no_output_____" ], [ "The same 128 dense layers, and 10 output layers as in the pre-convolution example:\n\n\n\n```\n tf.keras.layers.Dense(128, activation='relu'),\n tf.keras.layers.Dense(10, activation='softmax')\n])\n```\n\n", "_____no_output_____" ], [ "Now compile the model, call the fit method to do the training, and evaluate the loss and accuracy from the test set.\n\n\n\n```\nmodel.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])\nmodel.fit(training_images, training_labels, epochs=5)\ntest_loss, test_acc = model.evaluate(test_images, test_labels)\nprint(test_acc)\n```\n\n\n", "_____no_output_____" ], [ "# Visualizing the Convolutions and Pooling\n\nThis code will show us the convolutions graphically. The print (test_labels[;100]) shows us the first 100 labels in the test set, and you can see that the ones at index 0, index 23 and index 28 are all the same value (9). They're all shoes. Let's take a look at the result of running the convolution on each, and you'll begin to see common features between them emerge. Now, when the DNN is training on that data, it's working with a lot less, and it's perhaps finding a commonality between shoes based on this convolution/pooling combination.", "_____no_output_____" ] ], [ [ "print(test_labels[:100])", "[9 2 1 1 6 1 4 6 5 7 4 5 7 3 4 1 2 4 8 0 2 5 7 9 1 4 6 0 9 3 8 8 3 3 8 0 7\n 5 7 9 6 1 3 7 6 7 2 1 2 2 4 4 5 8 2 2 8 4 8 0 7 7 8 5 1 1 2 3 9 8 7 0 2 6\n 2 3 1 2 8 4 1 8 5 9 5 0 3 2 0 6 5 3 6 7 1 8 0 1 4 2]\n" ], [ "import matplotlib.pyplot as plt\nf, axarr = plt.subplots(3,4)\nFIRST_IMAGE=0\nSECOND_IMAGE=7\nTHIRD_IMAGE=26\nCONVOLUTION_NUMBER = 1\nfrom tensorflow.keras import models\nlayer_outputs = [layer.output for layer in model.layers]\nactivation_model = tf.keras.models.Model(inputs = model.input, outputs = layer_outputs)\nfor x in range(0,4):\n f1 = activation_model.predict(test_images[FIRST_IMAGE].reshape(1, 28, 28, 1))[x]\n axarr[0,x].imshow(f1[0, : , :, CONVOLUTION_NUMBER], cmap='inferno')\n axarr[0,x].grid(False)\n f2 = activation_model.predict(test_images[SECOND_IMAGE].reshape(1, 28, 28, 1))[x]\n axarr[1,x].imshow(f2[0, : , :, CONVOLUTION_NUMBER], cmap='inferno')\n axarr[1,x].grid(False)\n f3 = activation_model.predict(test_images[THIRD_IMAGE].reshape(1, 28, 28, 1))[x]\n axarr[2,x].imshow(f3[0, : , :, CONVOLUTION_NUMBER], cmap='inferno')\n axarr[2,x].grid(False)", "_____no_output_____" ] ], [ [ "EXERCISES\n\n1. Try editing the convolutions. Change the 32s to either 16 or 64. What impact will this have on accuracy and/or training time.\n\n2. Remove the final Convolution. What impact will this have on accuracy or training time?\n\n3. How about adding more Convolutions? What impact do you think this will have? Experiment with it.\n\n4. Remove all Convolutions but the first. What impact do you think this will have? Experiment with it. \n\n5. In the previous lesson you implemented a callback to check on the loss function and to cancel training once it hit a certain amount. See if you can implement that here!", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nprint(tf.__version__)\nmnist = tf.keras.datasets.mnist\n(training_images, training_labels), (test_images, test_labels) = mnist.load_data()\n\ntraining_images=training_images.reshape(60000, 28, 28, 1)\ntraining_images=training_images / 255.0\ntest_images = test_images.reshape(10000, 28, 28, 1)\ntest_images=test_images/255.0\n\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(32, (3,3), activation='relu', input_shape=(28, 28, 1)),\n tf.keras.layers.MaxPooling2D(2, 2),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(128, activation='relu'),\n tf.keras.layers.Dense(10, activation='softmax')\n])\n\nmodel.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])\nmodel.fit(training_images, training_labels, epochs=10)\ntest_loss, test_acc = model.evaluate(test_images, test_labels)\n\nprint(test_acc)", "1.12.0\nEpoch 1/10\n60000/60000==============================] - 6s 104us/sample - loss: 0.1510 - acc: 0.9551\nEpoch 2/10\n60000/60000==============================] - 5s 79us/sample - loss: 0.0512 - acc: 0.9843\nEpoch 3/10\n60000/60000==============================] - 5s 77us/sample - loss: 0.0319 - acc: 0.9902\nEpoch 4/10\n60000/60000==============================] - 5s 78us/sample - loss: 0.0209 - acc: 0.9934\nEpoch 5/10\n60000/60000==============================] - 5s 78us/sample - loss: 0.0136 - acc: 0.9956\nEpoch 6/10\n60000/60000==============================] - 5s 78us/sample - loss: 0.0111 - acc: 0.9964\nEpoch 7/10\n60000/60000==============================] - 5s 79us/sample - loss: 0.0076 - acc: 0.9974\nEpoch 8/10\n60000/60000==============================] - 5s 78us/sample - loss: 0.0052 - acc: 0.9985\nEpoch 9/10\n60000/60000==============================] - 5s 81us/sample - loss: 0.0046 - acc: 0.9988\nEpoch 10/10\n60000/60000==============================] - 5s 81us/sample - loss: 0.0053 - acc: 0.9981\n10000/10000==============================] - 1s 53us/sample - loss: 0.0583 - acc: 0.9873\n0.9873\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
4a7b54ad0e6ead5313473ab1e2f8dc39371a742e
18,903
ipynb
Jupyter Notebook
week07_seq2seq/captioning_theano.ipynb
ftju/Practical_DL
b45d53949d5cb08f7d604fe34c83556a1c2cc65b
[ "MIT" ]
1,091
2017-09-11T09:26:41.000Z
2022-03-31T12:25:48.000Z
week07_seq2seq/captioning_theano.ipynb
ftju/Practical_DL
b45d53949d5cb08f7d604fe34c83556a1c2cc65b
[ "MIT" ]
67
2018-02-14T13:40:57.000Z
2021-05-27T13:06:38.000Z
week07_seq2seq/captioning_theano.ipynb
ftju/Practical_DL
b45d53949d5cb08f7d604fe34c83556a1c2cc65b
[ "MIT" ]
585
2017-09-12T13:34:29.000Z
2022-03-29T18:28:39.000Z
28.554381
154
0.579908
[ [ [ "DeepLarning Couse HSE 2016 fall: \n* Arseniy Ashuha, you can text me ```[email protected]```,\n* ```https://vk.com/ars.ashuha``` \n* partially reusing https://github.com/ebenolson/pydata2015\n\n<h1 align=\"center\"> Image Captioning </h1> \n\nIn this seminar you'll be going through the image captioning pipeline.\n\nTo begin with, let us download the dataset of image features from a pre-trained GoogleNet.\n\n", "_____no_output_____" ] ], [ [ "!wget https://www.dropbox.com/s/3hj16b0fj6yw7cc/data.tar.gz?dl=1 -O data.tar.gz\n!tar -xvzf data.tar.gz", "_____no_output_____" ] ], [ [ "### Data preprocessing", "_____no_output_____" ] ], [ [ "%%time\n# Read Dataset\nimport numpy as np\nimport pickle\n\nimg_codes = np.load(\"data/image_codes.npy\")\ncaptions = pickle.load(open('data/caption_tokens.pcl', 'rb'))", "_____no_output_____" ], [ "print \"each image code is a 1000-unit vector:\", img_codes.shape\nprint img_codes[0,:10]\nprint '\\n\\n'\nprint \"for each image there are 5-7 descriptions, e.g.:\\n\"\nprint '\\n'.join(captions[0])", "_____no_output_____" ], [ "#split descriptions into tokens\nfor img_i in range(len(captions)):\n for caption_i in range(len(captions[img_i])):\n sentence = captions[img_i][caption_i] \n captions[img_i][caption_i] = [\"#START#\"]+sentence.split(' ')+[\"#END#\"]\n", "_____no_output_____" ], [ "# Build a Vocabulary\nfrom collections import Counter\nword_counts = Counter()\n\n<Compute word frequencies for each word in captions. See code above for data structure>\n\n", "_____no_output_____" ], [ "vocab = ['#UNK#', '#START#', '#END#']\nvocab += [k for k, v in word_counts.items() if v >= 5]\nn_tokens = len(vocab)\n\nassert 10000 <= n_tokens <= 10500\n\nword_to_index = {w: i for i, w in enumerate(vocab)}", "_____no_output_____" ], [ "PAD_ix = -1\nUNK_ix = vocab.index('#UNK#')\n\n#good old as_matrix for the third time\ndef as_matrix(sequences,max_len=None):\n max_len = max_len or max(map(len,sequences))\n \n matrix = np.zeros((len(sequences),max_len),dtype='int32')+PAD_ix\n for i,seq in enumerate(sequences):\n row_ix = [word_to_index.get(word,UNK_ix) for word in seq[:max_len]]\n matrix[i,:len(row_ix)] = row_ix\n \n return matrix\n", "_____no_output_____" ], [ "#try it out on several descriptions of a random image\nas_matrix(captions[1337])", "_____no_output_____" ] ], [ [ "### Mah Neural Network", "_____no_output_____" ] ], [ [ "# network shapes. \nCNN_FEATURE_SIZE = img_codes.shape[1]\nEMBED_SIZE = 128 #pls change me if u want\nLSTM_UNITS = 200 #pls change me if u want", "_____no_output_____" ], [ "import theano\nimport theano.tensor as T", "_____no_output_____" ], [ "# Input Variable\nsentences = T.imatrix()# [batch_size x time] of word ids\nimage_vectors = T.matrix() # [batch size x unit] of CNN image features\nsentence_mask = T.neq(sentences,PAD_ix)", "_____no_output_____" ], [ "import lasagne\nfrom lasagne.layers import *", "_____no_output_____" ], [ "#network inputs\nl_words = InputLayer((None,None),sentences )\nl_mask = InputLayer((None,None),sentence_mask )\n\n#embeddings for words \nl_word_embeddings = <apply word embedding. use EMBED_SIZE>\n\n#cudos for using some pre-trained embedding :)", "_____no_output_____" ], [ "# input layer for image features\nl_image_features = InputLayer((None,CNN_FEATURE_SIZE),image_vectors )\n\n#convert 1000 image features from googlenet to whatever LSTM_UNITS you have set\n#it's also a good idea to add some dropout here and there\nl_image_features_small = <convert l_image features to a shape equal to rnn hidden state. Also play with dropout/noize>\n\nassert l_image_features_small.output_shape == (None,LSTM_UNITS)", "_____no_output_____" ], [ "# Concatinate image features and word embedings in one sequence \ndecoder = a recurrent layer (gru/lstm) with following checklist:\n# * takes word embeddings as an input\n# * has LSTM_UNITS units in the final layer\n# * has cell_init (or hid init for gru) set to converted image features\n# * mask_input = input_mask\n# * don't forget the grad clipping (~5-10)\n\n#find out better recurrent architectures for bonus point", "_____no_output_____" ], [ "# Decoding of rnn hiden states\nfrom broadcast import BroadcastLayer,UnbroadcastLayer\n\n#apply whatever comes next to each tick of each example in a batch. Equivalent to 2 reshapes\nbroadcast_decoder_ticks = BroadcastLayer(decoder,(0,1))\nprint \"broadcasted decoder shape = \",broadcast_decoder_ticks.output_shape", "_____no_output_____" ], [ "#predict probabilities for next tokens\npredicted_probabilities_each_tick = <predict probabilities for each tick, using broadcasted_decoder_shape as an input. No reshaping needed here.>\n# maybe a more complicated architecture will work better?", "_____no_output_____" ], [ "#un-broadcast back into (batch,tick,probabilities)\npredicted_probabilities = UnbroadcastLayer(predicted_probabilities_each_tick,\n broadcast_layer=broadcast_decoder_ticks)\n\nprint \"output shape = \",predicted_probabilities.output_shape\n\n#remove if you know what you're doing (e.g. 1d convolutions or fixed shape)\nassert predicted_probabilities.output_shape == (None, None, 10373)", "_____no_output_____" ] ], [ [ "### Some tricks\n* If you train large network, it is usually a good idea to make a 2-stage prediction\n 1. (large recurrent state) -> (bottleneck e.g. 256)\n 2. (bottleneck) -> (vocabulary size)\n * this way you won't need to store/train (large_recurrent_state x vocabulary size) matrix\n* Also maybe use Hierarchical Softmax?\n * https://gist.github.com/justheuristic/581853c6d6b87eae9669297c2fb1052d\n\n\n", "_____no_output_____" ] ], [ [ "next_word_probas = <get network output>\n\n\npredictions_flat = next_word_probas[:,:-1].reshape((-1,n_tokens))\nreference_answers = sentences[:,1:].reshape((-1,))\n\n#write symbolic loss function to minimize over NN params\nloss = <compute elementwise loss function>", "_____no_output_____" ], [ "#trainable NN weights\nweights = get_all_params(predicted_probabilities,trainable=True)\nupdates = <parameter updates using your favorite algoritm>", "_____no_output_____" ], [ "#compile a functions for training and evaluation\n#please not that your functions must accept image features as FIRST param and sentences as second one\ntrain_step = <function that takes input sentence and image mask, outputs loss and updates weights>\nval_step = <function that takes input sentence and image mask and outputs loss>\n#for val_step use deterministic=True if you have any dropout/noize", "_____no_output_____" ] ], [ [ "# Training\n\n* You first have to implement a batch generator\n* Than the network will get trained the usual way", "_____no_output_____" ] ], [ [ "captions = np.array(captions)", "_____no_output_____" ], [ "from random import choice\n\ndef generate_batch(images,captions,batch_size,max_caption_len=None):\n \n #sample random numbers for image/caption indicies\n random_image_ix = np.random.randint(0,len(images),size=batch_size)\n \n #get images\n batch_images = images[random_image_ix]\n \n #5-7 captions for each image\n captions_for_batch_images = captions[random_image_ix]\n \n #pick 1 from 5-7 captions for each image\n batch_captions = map(choice,captions_for_batch_images)\n \n #convert to matrix\n batch_captions_ix = as_matrix(batch_captions,max_len=max_caption_len)\n \n return batch_images, batch_captions_ix\n ", "_____no_output_____" ], [ "generate_batch(img_codes,captions,3)", "_____no_output_____" ] ], [ [ "### Main loop\n* We recommend you to periodically evaluate the network using the next \"apply trained model\" block\n * its safe to interrupt training, run a few examples and start training again", "_____no_output_____" ] ], [ [ "batch_size=50 #adjust me\nn_epochs=100 #adjust me\nn_batches_per_epoch = 50 #adjust me\nn_validation_batches = 5 #how many batches are used for validation after each epoch\n", "_____no_output_____" ], [ "from tqdm import tqdm\n\nfor epoch in range(n_epochs):\n \n train_loss=0\n for _ in tqdm(range(n_batches_per_epoch)):\n train_loss += train_step(*generate_batch(img_codes,captions,batch_size))\n train_loss /= n_batches_per_epoch\n \n val_loss=0\n for _ in range(n_validation_batches):\n val_loss += val_step(*generate_batch(img_codes,captions,batch_size))\n val_loss /= n_validation_batches\n \n print('\\nEpoch: {}, train loss: {}, val loss: {}'.format(epoch, train_loss, val_loss))\n\nprint(\"Finish :)\")", "_____no_output_____" ] ], [ [ "### apply trained model", "_____no_output_____" ] ], [ [ "#the same kind you did last week, but a bit smaller\nfrom pretrained_lenet import build_model,preprocess,MEAN_VALUES\n\n# build googlenet\nlenet = build_model()\n\n#load weights\nlenet_weights = pickle.load(open('data/blvc_googlenet.pkl'))['param values']\n#python3: pickle.load(open('data/blvc_googlenet.pkl', 'rb'), encoding='latin1')['param values']\n\n\nset_all_param_values(lenet[\"prob\"], lenet_weights)\n\n#compile get_features\ncnn_input_var = lenet['input'].input_var\ncnn_feature_layer = lenet['loss3/classifier']\nget_cnn_features = theano.function([cnn_input_var], lasagne.layers.get_output(cnn_feature_layer))", "_____no_output_____" ], [ "from matplotlib import pyplot as plt\n%matplotlib inline\n\n#sample image\nimg = plt.imread('data/Dog-and-Cat.jpg')\nimg = preprocess(img)", "_____no_output_____" ], [ "#deprocess and show, one line :)\nfrom pretrained_lenet import MEAN_VALUES\nplt.imshow(np.transpose((img[0] + MEAN_VALUES)[::-1],[1,2,0]).astype('uint8'))", "_____no_output_____" ] ], [ [ "## Generate caption", "_____no_output_____" ] ], [ [ "last_word_probas = <get network-predicted probas at last tick\n#TRY OUT deterministic=True if you want more steady results\n\nget_probs = theano.function([image_vectors,sentences], last_word_probas)\n\n#this is exactly the generation function from week5 classwork,\n#except now we condition on image features instead of words\ndef generate_caption(image,caption_prefix = (\"START\",),t=1,sample=True,max_len=100):\n image_features = get_cnn_features(image)\n caption = list(caption_prefix)\n for _ in range(max_len):\n \n next_word_probs = <obtain probabilities for next words>\n assert len(next_word_probs.shape) ==1 #must be one-dimensional\n #apply temperature\n next_word_probs = next_word_probs**t / np.sum(next_word_probs**t)\n\n if sample:\n next_word = np.random.choice(vocab,p=next_word_probs) \n else:\n next_word = vocab[np.argmax(next_word_probs)]\n\n caption.append(next_word)\n\n if next_word==\"#END#\":\n break\n \n return caption", "_____no_output_____" ], [ "for i in range(10):\n print ' '.join(generate_caption(img,t=5.)[1:-1])", "_____no_output_____" ] ], [ [ "# Demo\n### Find at least 10 images to test it on.\n* Seriously, that's part of an assignment. Go get at least 10 pictures to get captioned\n* Make sure it works okay on __simple__ images before going to something more comples\n* Photos, not animation/3d/drawings, unless you want to train CNN network on anime\n* Mind the aspect ratio (see what `preprocess` does to your image)", "_____no_output_____" ] ], [ [ "#apply your network on image sample you found\n#\n#", "_____no_output_____" ] ], [ [ "# grading\n\n* base 5 if it compiles and trains without exploding\n* +1 for finding representative set of reference examples\n* +2 for providing 10+ examples where network provides reasonable captions (at least sometimes :) )\n * you may want to predict with sample=False and deterministic=True for consistent results\n * kudos for submitting network params that reproduce it\n* +2 for providing 10+ examples where network fails IF you also got previous 10 examples right\n\n\n* bonus points for experiments with architecture and initialization (see above)\n* bonus points for trying out other pre-trained nets for captioning\n* a whole lot of bonus points if you also train via metric learning\n * image -> vec\n * caption -> vec (encoder, not decoder)\n * loss = correct captions must be closer, wrong ones must be farther\n * prediction = choose caption that is closest to image\n* a freaking whole lot of points if you also obtain statistically signifficant results the other way round\n * take caption, get closest image", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
4a7b65800eb0020e28963066326f0e274d384e37
142,975
ipynb
Jupyter Notebook
Google colab notes/Outfit_neural_network.ipynb
ZinnurovArtur/Colour-Mathcer
fc0d85618b06fbdb48c9a15eec5da7ebd99e07ad
[ "MIT" ]
null
null
null
Google colab notes/Outfit_neural_network.ipynb
ZinnurovArtur/Colour-Mathcer
fc0d85618b06fbdb48c9a15eec5da7ebd99e07ad
[ "MIT" ]
null
null
null
Google colab notes/Outfit_neural_network.ipynb
ZinnurovArtur/Colour-Mathcer
fc0d85618b06fbdb48c9a15eec5da7ebd99e07ad
[ "MIT" ]
null
null
null
156.770833
59,982
0.794663
[ [ [ "<a href=\"https://colab.research.google.com/github/ZinnurovArtur/Colour-Match/blob/main/Outfit_neural_network.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "from sklearn.cluster import KMeans\nimport matplotlib.pyplot as plt\nimport numpy as np \nimport cv2\nimport random\nfrom collections import Counter\nfrom tensorflow.keras.models import load_model\n\n\n\nimport tensorflow as tf\nfrom tensorflow.keras.models import *\nfrom tensorflow.keras.layers import *\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras import losses\nfrom tensorflow.keras.optimizers import Adam\n\nimport os\n%matplotlib inline", "_____no_output_____" ], [ "\nfrom google.colab import drive\nfrom keras.datasets import fashion_mnist\n\ndrive.mount('/content/drive')\n\n", "Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n" ], [ "image = cv2.imread('/content/drive/MyDrive/Colab Notebooks/datasets/pictures_outfit/bauman/bauman-yeallow.jpg')\n\nplt.imshow(image)", "_____no_output_____" ], [ "path = \"/content/drive/MyDrive/datasetTemp/\"", "_____no_output_____" ], [ "\noriginals = []\nimages_toget = []\n\nmean = np.zeros((224,224,3))\nnumber_ofim = 0\n\n\n\nfor filename in os.listdir(path+\"original2/\"):\n if (filename.endswith(\"png\")):\n number_ofim +=1\n print(filename)\n original = cv2.imread(path+\"original2/\"+ filename)\n original = cv2.resize(original,(224,224))\n originals.append(original)\n\n mean[:,:,0]=mean[:,:,0]+original[:,:,0]\n mean[:,:,1]=mean[:,:,1]+original[:,:,1]\n mean[:,:,2]=mean[:,:,2]+original[:,:,2]\n\nfor filename in os.listdir(path+\"original/\"):\n if (filename.endswith(\"png\")):\n number_ofim +=1\n print(filename)\n original = cv2.imread(path+\"original/\"+ filename)\n original = cv2.resize(original,(224,224))\n originals.append(original)\n\n mean[:,:,0]=mean[:,:,0]+original[:,:,0]\n mean[:,:,1]=mean[:,:,1]+original[:,:,1]\n mean[:,:,2]=mean[:,:,2]+original[:,:,2]\n\n\n\narrDress = []\narrBody = []\nfor filename in os.listdir(path+\"body2/\"):\n if filename.endswith(\"png\"):\n body = path+\"body2/\"+filename\n arrBody.append(body)\n\nfor filename in os.listdir(path+\"dress2/\"):\n if filename.endswith(\"png\"):\n dress = path+\"dress2/\"+filename\n arrDress.append(dress)\n\nfor filename in os.listdir(path+\"body/\"):\n if filename.endswith(\"png\"):\n body = path+\"body/\"+filename\n arrBody.append(body)\n\nfor filename in os.listdir(path+\"dress/\"):\n if filename.endswith(\"png\"):\n dress = path+\"dress/\"+filename\n arrDress.append(dress)\n\nfor i in range(len(arrBody)):\n body = cv2.imread(arrBody[i],0)\n dress = cv2.imread(arrDress[i],0)\n dress[dress == 255] = 0\n dress[dress > 0] = 255\n dress = cv2.resize(dress,(224,224))\n\n body[body == 255] = 0\n body[body > 0] = 255\n body = cv2.resize(body,(224,224))\n\n skin = body - dress\n bg = (255 - body)/255\n skin = (255 - skin)/255\n dress = (255 - dress)/255\n \n gt = np.zeros((224,224,3))\n gt[:,:,0] = (1-skin)\n gt[:,:,1] = (1-dress)\n gt[:,:,2] = bg\n images_toget.append(gt)\n\n \n\nmean = mean / number_ofim\nprint(number_ofim)\nmean = mean.astype('int')", "original2.png\noriginal3.png\noriginal5.png\noriginal6.png\noriginal7.png\noriginal8.png\noriginal9.png\noriginal11.png\noriginal13.png\noriginal16.png\noriginal17.png\noriginal18.png\noriginal20.png\noriginal21.png\noriginal24.png\noriginal25.png\noriginal26.png\noriginal29.png\noriginal30.png\noriginal31.png\noriginal32.png\noriginal33.png\noriginal36.png\noriginal39.png\noriginal40.png\noriginal43.png\noriginal44.png\noriginal45.png\noriginal46.png\noriginal48.png\noriginal50.png\noriginal51.png\noriginal53.png\noriginal54.png\noriginal60.png\noriginal3.png\noriginal4.png\noriginal7.png\noriginal8.png\noriginal11.png\noriginal16.png\noriginal17.png\noriginal18.png\noriginal20.png\noriginal26.png\noriginal28.png\n46\n" ], [ "import pickle\n# pixel mean array\npickle.dump(mean, open(path+\"meanArrpixels.pkl\", \"wb\"))", "_____no_output_____" ], [ "def get_unet():\n inputs = Input((None, None, 3))\n conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(inputs)\n conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n\n conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)\n conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n\n conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)\n conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n\n conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool3)\n conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)\n pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)\n\n conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)\n conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)\n\n up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv5), conv4], axis=3)\n conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(up6)\n conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)\n\n up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv6), conv3], axis=3)\n conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(up7)\n conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)\n\n up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7), conv2], axis=3)\n conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(up8)\n conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)\n\n up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3)\n conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(up9)\n conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)\n\n conv10 = Conv2D(3, (1, 1), activation='sigmoid')(conv9)\n\n model = Model(inputs=[inputs], outputs=[conv10])\n\n model.compile(optimizer=Adam(lr=1e-3), loss=losses.binary_crossentropy, metrics=['accuracy'])\n\n return model", "_____no_output_____" ], [ "\nmodel = get_unet()\nmodel.summary()\n\nXtrain = np.asarray(originals) - mean.reshape(-1,224,224,3) \nXtest = np.asarray(images_toget).reshape(-1,224,224,3) \nprint(Xtest.shape)", "Model: \"model_2\"\n__________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n==================================================================================================\ninput_3 (InputLayer) [(None, None, None, 0 \n__________________________________________________________________________________________________\nconv2d_38 (Conv2D) (None, None, None, 3 896 input_3[0][0] \n__________________________________________________________________________________________________\nconv2d_39 (Conv2D) (None, None, None, 3 9248 conv2d_38[0][0] \n__________________________________________________________________________________________________\nmax_pooling2d_8 (MaxPooling2D) (None, None, None, 3 0 conv2d_39[0][0] \n__________________________________________________________________________________________________\nconv2d_40 (Conv2D) (None, None, None, 6 18496 max_pooling2d_8[0][0] \n__________________________________________________________________________________________________\nconv2d_41 (Conv2D) (None, None, None, 6 36928 conv2d_40[0][0] \n__________________________________________________________________________________________________\nmax_pooling2d_9 (MaxPooling2D) (None, None, None, 6 0 conv2d_41[0][0] \n__________________________________________________________________________________________________\nconv2d_42 (Conv2D) (None, None, None, 1 73856 max_pooling2d_9[0][0] \n__________________________________________________________________________________________________\nconv2d_43 (Conv2D) (None, None, None, 1 147584 conv2d_42[0][0] \n__________________________________________________________________________________________________\nmax_pooling2d_10 (MaxPooling2D) (None, None, None, 1 0 conv2d_43[0][0] \n__________________________________________________________________________________________________\nconv2d_44 (Conv2D) (None, None, None, 2 295168 max_pooling2d_10[0][0] \n__________________________________________________________________________________________________\nconv2d_45 (Conv2D) (None, None, None, 2 590080 conv2d_44[0][0] \n__________________________________________________________________________________________________\nmax_pooling2d_11 (MaxPooling2D) (None, None, None, 2 0 conv2d_45[0][0] \n__________________________________________________________________________________________________\nconv2d_46 (Conv2D) (None, None, None, 5 1180160 max_pooling2d_11[0][0] \n__________________________________________________________________________________________________\nconv2d_47 (Conv2D) (None, None, None, 5 2359808 conv2d_46[0][0] \n__________________________________________________________________________________________________\nconv2d_transpose_8 (Conv2DTrans (None, None, None, 2 524544 conv2d_47[0][0] \n__________________________________________________________________________________________________\nconcatenate_8 (Concatenate) (None, None, None, 5 0 conv2d_transpose_8[0][0] \n conv2d_45[0][0] \n__________________________________________________________________________________________________\nconv2d_48 (Conv2D) (None, None, None, 2 1179904 concatenate_8[0][0] \n__________________________________________________________________________________________________\nconv2d_49 (Conv2D) (None, None, None, 2 590080 conv2d_48[0][0] \n__________________________________________________________________________________________________\nconv2d_transpose_9 (Conv2DTrans (None, None, None, 1 131200 conv2d_49[0][0] \n__________________________________________________________________________________________________\nconcatenate_9 (Concatenate) (None, None, None, 2 0 conv2d_transpose_9[0][0] \n conv2d_43[0][0] \n__________________________________________________________________________________________________\nconv2d_50 (Conv2D) (None, None, None, 1 295040 concatenate_9[0][0] \n__________________________________________________________________________________________________\nconv2d_51 (Conv2D) (None, None, None, 1 147584 conv2d_50[0][0] \n__________________________________________________________________________________________________\nconv2d_transpose_10 (Conv2DTran (None, None, None, 6 32832 conv2d_51[0][0] \n__________________________________________________________________________________________________\nconcatenate_10 (Concatenate) (None, None, None, 1 0 conv2d_transpose_10[0][0] \n conv2d_41[0][0] \n__________________________________________________________________________________________________\nconv2d_52 (Conv2D) (None, None, None, 6 73792 concatenate_10[0][0] \n__________________________________________________________________________________________________\nconv2d_53 (Conv2D) (None, None, None, 6 36928 conv2d_52[0][0] \n__________________________________________________________________________________________________\nconv2d_transpose_11 (Conv2DTran (None, None, None, 3 8224 conv2d_53[0][0] \n__________________________________________________________________________________________________\nconcatenate_11 (Concatenate) (None, None, None, 6 0 conv2d_transpose_11[0][0] \n conv2d_39[0][0] \n__________________________________________________________________________________________________\nconv2d_54 (Conv2D) (None, None, None, 3 18464 concatenate_11[0][0] \n__________________________________________________________________________________________________\nconv2d_55 (Conv2D) (None, None, None, 3 9248 conv2d_54[0][0] \n__________________________________________________________________________________________________\nconv2d_56 (Conv2D) (None, None, None, 3 99 conv2d_55[0][0] \n==================================================================================================\nTotal params: 7,760,163\nTrainable params: 7,760,163\nNon-trainable params: 0\n__________________________________________________________________________________________________\n(46, 224, 224, 3)\n" ], [ "\nmodel = get_unet()\nhistory = model.fit(Xtrain, Xtest, epochs=120)", "Epoch 1/120\n2/2 [==============================] - 2s 198ms/step - loss: 1.1353 - accuracy: 0.5986\nEpoch 2/120\n2/2 [==============================] - 1s 202ms/step - loss: 0.7170 - accuracy: 0.5565\nEpoch 3/120\n2/2 [==============================] - 1s 197ms/step - loss: 0.6248 - accuracy: 0.6769\nEpoch 4/120\n2/2 [==============================] - 1s 199ms/step - loss: 0.5810 - accuracy: 0.6730\nEpoch 5/120\n2/2 [==============================] - 1s 200ms/step - loss: 0.5363 - accuracy: 0.6664\nEpoch 6/120\n2/2 [==============================] - 1s 195ms/step - loss: 0.5192 - accuracy: 0.6574\nEpoch 7/120\n2/2 [==============================] - 1s 194ms/step - loss: 0.5055 - accuracy: 0.6653\nEpoch 8/120\n2/2 [==============================] - 1s 197ms/step - loss: 0.4963 - accuracy: 0.6701\nEpoch 9/120\n2/2 [==============================] - 1s 197ms/step - loss: 0.4989 - accuracy: 0.6730\nEpoch 10/120\n2/2 [==============================] - 1s 196ms/step - loss: 0.4889 - accuracy: 0.6831\nEpoch 11/120\n2/2 [==============================] - 1s 200ms/step - loss: 0.4874 - accuracy: 0.6876\nEpoch 12/120\n2/2 [==============================] - 1s 201ms/step - loss: 0.4875 - accuracy: 0.6749\nEpoch 13/120\n2/2 [==============================] - 1s 198ms/step - loss: 0.4801 - accuracy: 0.6787\nEpoch 14/120\n2/2 [==============================] - 1s 199ms/step - loss: 0.4685 - accuracy: 0.6963\nEpoch 15/120\n2/2 [==============================] - 1s 199ms/step - loss: 0.4682 - accuracy: 0.6907\nEpoch 16/120\n2/2 [==============================] - 1s 197ms/step - loss: 0.4551 - accuracy: 0.7020\nEpoch 17/120\n2/2 [==============================] - 1s 196ms/step - loss: 0.4667 - accuracy: 0.6900\nEpoch 18/120\n2/2 [==============================] - 1s 199ms/step - loss: 0.4628 - accuracy: 0.7057\nEpoch 19/120\n2/2 [==============================] - 1s 199ms/step - loss: 0.4630 - accuracy: 0.7007\nEpoch 20/120\n2/2 [==============================] - 1s 201ms/step - loss: 0.4482 - accuracy: 0.7043\nEpoch 21/120\n2/2 [==============================] - 1s 201ms/step - loss: 0.4503 - accuracy: 0.7202\nEpoch 22/120\n2/2 [==============================] - 1s 201ms/step - loss: 0.4435 - accuracy: 0.7103\nEpoch 23/120\n2/2 [==============================] - 1s 199ms/step - loss: 0.4436 - accuracy: 0.7142\nEpoch 24/120\n2/2 [==============================] - 1s 199ms/step - loss: 0.4311 - accuracy: 0.7323\nEpoch 25/120\n2/2 [==============================] - 1s 201ms/step - loss: 0.4430 - accuracy: 0.7276\nEpoch 26/120\n2/2 [==============================] - 1s 196ms/step - loss: 0.4390 - accuracy: 0.7195\nEpoch 27/120\n2/2 [==============================] - 1s 204ms/step - loss: 0.4303 - accuracy: 0.7165\nEpoch 28/120\n2/2 [==============================] - 1s 204ms/step - loss: 0.4382 - accuracy: 0.7303\nEpoch 29/120\n2/2 [==============================] - 1s 199ms/step - loss: 0.4219 - accuracy: 0.7492\nEpoch 30/120\n2/2 [==============================] - 1s 201ms/step - loss: 0.4206 - accuracy: 0.7406\nEpoch 31/120\n2/2 [==============================] - 1s 200ms/step - loss: 0.4235 - accuracy: 0.7334\nEpoch 32/120\n2/2 [==============================] - 1s 203ms/step - loss: 0.4198 - accuracy: 0.7505\nEpoch 33/120\n2/2 [==============================] - 1s 201ms/step - loss: 0.4184 - accuracy: 0.7449\nEpoch 34/120\n2/2 [==============================] - 1s 201ms/step - loss: 0.4009 - accuracy: 0.7437\nEpoch 35/120\n2/2 [==============================] - 1s 199ms/step - loss: 0.4409 - accuracy: 0.7475\nEpoch 36/120\n2/2 [==============================] - 1s 200ms/step - loss: 0.4453 - accuracy: 0.6938\nEpoch 37/120\n2/2 [==============================] - 1s 207ms/step - loss: 0.4496 - accuracy: 0.7361\nEpoch 38/120\n2/2 [==============================] - 1s 203ms/step - loss: 0.4364 - accuracy: 0.7495\nEpoch 39/120\n2/2 [==============================] - 1s 207ms/step - loss: 0.3846 - accuracy: 0.7497\nEpoch 40/120\n2/2 [==============================] - 1s 203ms/step - loss: 0.4065 - accuracy: 0.7570\nEpoch 41/120\n2/2 [==============================] - 1s 201ms/step - loss: 0.4336 - accuracy: 0.7128\nEpoch 42/120\n2/2 [==============================] - 1s 208ms/step - loss: 0.4202 - accuracy: 0.7604\nEpoch 43/120\n2/2 [==============================] - 1s 201ms/step - loss: 0.4185 - accuracy: 0.7585\nEpoch 44/120\n2/2 [==============================] - 1s 203ms/step - loss: 0.4018 - accuracy: 0.7624\nEpoch 45/120\n2/2 [==============================] - 1s 204ms/step - loss: 0.3717 - accuracy: 0.7759\nEpoch 46/120\n2/2 [==============================] - 1s 205ms/step - loss: 0.3595 - accuracy: 0.7436\nEpoch 47/120\n2/2 [==============================] - 1s 204ms/step - loss: 0.3432 - accuracy: 0.7774\nEpoch 48/120\n2/2 [==============================] - 1s 203ms/step - loss: 0.3236 - accuracy: 0.7899\nEpoch 49/120\n2/2 [==============================] - 1s 201ms/step - loss: 0.3343 - accuracy: 0.7767\nEpoch 50/120\n2/2 [==============================] - 1s 206ms/step - loss: 0.3178 - accuracy: 0.8017\nEpoch 51/120\n2/2 [==============================] - 1s 202ms/step - loss: 0.3176 - accuracy: 0.7913\nEpoch 52/120\n2/2 [==============================] - 1s 202ms/step - loss: 0.3346 - accuracy: 0.7740\nEpoch 53/120\n2/2 [==============================] - 1s 203ms/step - loss: 0.3330 - accuracy: 0.7932\nEpoch 54/120\n2/2 [==============================] - 1s 203ms/step - loss: 0.3053 - accuracy: 0.7999\nEpoch 55/120\n2/2 [==============================] - 1s 208ms/step - loss: 0.3082 - accuracy: 0.8037\nEpoch 56/120\n2/2 [==============================] - 1s 201ms/step - loss: 0.2978 - accuracy: 0.8050\nEpoch 57/120\n2/2 [==============================] - 1s 206ms/step - loss: 0.2887 - accuracy: 0.8144\nEpoch 58/120\n2/2 [==============================] - 1s 205ms/step - loss: 0.2895 - accuracy: 0.8106\nEpoch 59/120\n2/2 [==============================] - 1s 202ms/step - loss: 0.2716 - accuracy: 0.8168\nEpoch 60/120\n2/2 [==============================] - 1s 206ms/step - loss: 0.2823 - accuracy: 0.8160\nEpoch 61/120\n2/2 [==============================] - 1s 202ms/step - loss: 0.2642 - accuracy: 0.8174\nEpoch 62/120\n2/2 [==============================] - 1s 206ms/step - loss: 0.2548 - accuracy: 0.8243\nEpoch 63/120\n2/2 [==============================] - 1s 206ms/step - loss: 0.2530 - accuracy: 0.8222\nEpoch 64/120\n2/2 [==============================] - 1s 203ms/step - loss: 0.2512 - accuracy: 0.8252\nEpoch 65/120\n2/2 [==============================] - 1s 206ms/step - loss: 0.2370 - accuracy: 0.8346\nEpoch 66/120\n2/2 [==============================] - 1s 207ms/step - loss: 0.2350 - accuracy: 0.8369\nEpoch 67/120\n2/2 [==============================] - 1s 206ms/step - loss: 0.2250 - accuracy: 0.8417\nEpoch 68/120\n2/2 [==============================] - 1s 205ms/step - loss: 0.2187 - accuracy: 0.8453\nEpoch 69/120\n2/2 [==============================] - 1s 203ms/step - loss: 0.2245 - accuracy: 0.8424\nEpoch 70/120\n2/2 [==============================] - 1s 200ms/step - loss: 0.2187 - accuracy: 0.8467\nEpoch 71/120\n2/2 [==============================] - 1s 202ms/step - loss: 0.2243 - accuracy: 0.8425\nEpoch 72/120\n2/2 [==============================] - 1s 201ms/step - loss: 0.2131 - accuracy: 0.8467\nEpoch 73/120\n2/2 [==============================] - 1s 206ms/step - loss: 0.2074 - accuracy: 0.8513\nEpoch 74/120\n2/2 [==============================] - 1s 201ms/step - loss: 0.2238 - accuracy: 0.8370\nEpoch 75/120\n2/2 [==============================] - 1s 202ms/step - loss: 0.2139 - accuracy: 0.8454\nEpoch 76/120\n2/2 [==============================] - 1s 203ms/step - loss: 0.2095 - accuracy: 0.8519\nEpoch 77/120\n2/2 [==============================] - 1s 204ms/step - loss: 0.2023 - accuracy: 0.8544\nEpoch 78/120\n2/2 [==============================] - 1s 201ms/step - loss: 0.1987 - accuracy: 0.8537\nEpoch 79/120\n2/2 [==============================] - 1s 204ms/step - loss: 0.2011 - accuracy: 0.8608\nEpoch 80/120\n2/2 [==============================] - 1s 201ms/step - loss: 0.1959 - accuracy: 0.8590\nEpoch 81/120\n2/2 [==============================] - 1s 200ms/step - loss: 0.2044 - accuracy: 0.8577\nEpoch 82/120\n2/2 [==============================] - 1s 199ms/step - loss: 0.1953 - accuracy: 0.8596\nEpoch 83/120\n2/2 [==============================] - 1s 200ms/step - loss: 0.1916 - accuracy: 0.8579\nEpoch 84/120\n2/2 [==============================] - 1s 201ms/step - loss: 0.1790 - accuracy: 0.8690\nEpoch 85/120\n2/2 [==============================] - 1s 203ms/step - loss: 0.1818 - accuracy: 0.8698\nEpoch 86/120\n2/2 [==============================] - 1s 202ms/step - loss: 0.1804 - accuracy: 0.8723\nEpoch 87/120\n2/2 [==============================] - 1s 197ms/step - loss: 0.1861 - accuracy: 0.8734\nEpoch 88/120\n2/2 [==============================] - 1s 195ms/step - loss: 0.1781 - accuracy: 0.8745\nEpoch 89/120\n2/2 [==============================] - 1s 202ms/step - loss: 0.2246 - accuracy: 0.8443\nEpoch 90/120\n2/2 [==============================] - 1s 199ms/step - loss: 0.1855 - accuracy: 0.8712\nEpoch 91/120\n2/2 [==============================] - 1s 199ms/step - loss: 0.1910 - accuracy: 0.8681\nEpoch 92/120\n2/2 [==============================] - 1s 198ms/step - loss: 0.1937 - accuracy: 0.8653\nEpoch 93/120\n2/2 [==============================] - 1s 207ms/step - loss: 0.1793 - accuracy: 0.8737\nEpoch 94/120\n2/2 [==============================] - 1s 206ms/step - loss: 0.1703 - accuracy: 0.8809\nEpoch 95/120\n2/2 [==============================] - 1s 194ms/step - loss: 0.1686 - accuracy: 0.8826\nEpoch 96/120\n2/2 [==============================] - 1s 199ms/step - loss: 0.1560 - accuracy: 0.8894\nEpoch 97/120\n2/2 [==============================] - 1s 202ms/step - loss: 0.1622 - accuracy: 0.8915\nEpoch 98/120\n2/2 [==============================] - 1s 199ms/step - loss: 0.1549 - accuracy: 0.8939\nEpoch 99/120\n2/2 [==============================] - 1s 202ms/step - loss: 0.1457 - accuracy: 0.9013\nEpoch 100/120\n2/2 [==============================] - 1s 198ms/step - loss: 0.1521 - accuracy: 0.8964\nEpoch 101/120\n2/2 [==============================] - 1s 200ms/step - loss: 0.1424 - accuracy: 0.9038\nEpoch 102/120\n2/2 [==============================] - 1s 199ms/step - loss: 0.1362 - accuracy: 0.9075\nEpoch 103/120\n2/2 [==============================] - 1s 192ms/step - loss: 0.1439 - accuracy: 0.9069\nEpoch 104/120\n2/2 [==============================] - 1s 198ms/step - loss: 0.1440 - accuracy: 0.9032\nEpoch 105/120\n2/2 [==============================] - 1s 196ms/step - loss: 0.1379 - accuracy: 0.9096\nEpoch 106/120\n2/2 [==============================] - 1s 201ms/step - loss: 0.1321 - accuracy: 0.9140\nEpoch 107/120\n2/2 [==============================] - 1s 199ms/step - loss: 0.1226 - accuracy: 0.9186\nEpoch 108/120\n2/2 [==============================] - 1s 195ms/step - loss: 0.1280 - accuracy: 0.9178\nEpoch 109/120\n2/2 [==============================] - 1s 203ms/step - loss: 0.1268 - accuracy: 0.9144\nEpoch 110/120\n2/2 [==============================] - 1s 196ms/step - loss: 0.1155 - accuracy: 0.9254\nEpoch 111/120\n2/2 [==============================] - 1s 198ms/step - loss: 0.1110 - accuracy: 0.9287\nEpoch 112/120\n2/2 [==============================] - 1s 196ms/step - loss: 0.1040 - accuracy: 0.9345\nEpoch 113/120\n2/2 [==============================] - 1s 196ms/step - loss: 0.0988 - accuracy: 0.9376\nEpoch 114/120\n2/2 [==============================] - 1s 198ms/step - loss: 0.1215 - accuracy: 0.9236\nEpoch 115/120\n2/2 [==============================] - 1s 197ms/step - loss: 0.1162 - accuracy: 0.9247\nEpoch 116/120\n2/2 [==============================] - 1s 201ms/step - loss: 0.1110 - accuracy: 0.9320\nEpoch 117/120\n2/2 [==============================] - 1s 197ms/step - loss: 0.1082 - accuracy: 0.9305\nEpoch 118/120\n2/2 [==============================] - 1s 197ms/step - loss: 0.1067 - accuracy: 0.9330\nEpoch 119/120\n2/2 [==============================] - 1s 199ms/step - loss: 0.1067 - accuracy: 0.9348\nEpoch 120/120\n2/2 [==============================] - 1s 194ms/step - loss: 0.1141 - accuracy: 0.9297\n" ], [ "model.summary()\nmodel.evaluate(Xtrain,Xtest)", "Model: \"model_3\"\n__________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n==================================================================================================\ninput_4 (InputLayer) [(None, None, None, 0 \n__________________________________________________________________________________________________\nconv2d_57 (Conv2D) (None, None, None, 3 896 input_4[0][0] \n__________________________________________________________________________________________________\nconv2d_58 (Conv2D) (None, None, None, 3 9248 conv2d_57[0][0] \n__________________________________________________________________________________________________\nmax_pooling2d_12 (MaxPooling2D) (None, None, None, 3 0 conv2d_58[0][0] \n__________________________________________________________________________________________________\nconv2d_59 (Conv2D) (None, None, None, 6 18496 max_pooling2d_12[0][0] \n__________________________________________________________________________________________________\nconv2d_60 (Conv2D) (None, None, None, 6 36928 conv2d_59[0][0] \n__________________________________________________________________________________________________\nmax_pooling2d_13 (MaxPooling2D) (None, None, None, 6 0 conv2d_60[0][0] \n__________________________________________________________________________________________________\nconv2d_61 (Conv2D) (None, None, None, 1 73856 max_pooling2d_13[0][0] \n__________________________________________________________________________________________________\nconv2d_62 (Conv2D) (None, None, None, 1 147584 conv2d_61[0][0] \n__________________________________________________________________________________________________\nmax_pooling2d_14 (MaxPooling2D) (None, None, None, 1 0 conv2d_62[0][0] \n__________________________________________________________________________________________________\nconv2d_63 (Conv2D) (None, None, None, 2 295168 max_pooling2d_14[0][0] \n__________________________________________________________________________________________________\nconv2d_64 (Conv2D) (None, None, None, 2 590080 conv2d_63[0][0] \n__________________________________________________________________________________________________\nmax_pooling2d_15 (MaxPooling2D) (None, None, None, 2 0 conv2d_64[0][0] \n__________________________________________________________________________________________________\nconv2d_65 (Conv2D) (None, None, None, 5 1180160 max_pooling2d_15[0][0] \n__________________________________________________________________________________________________\nconv2d_66 (Conv2D) (None, None, None, 5 2359808 conv2d_65[0][0] \n__________________________________________________________________________________________________\nconv2d_transpose_12 (Conv2DTran (None, None, None, 2 524544 conv2d_66[0][0] \n__________________________________________________________________________________________________\nconcatenate_12 (Concatenate) (None, None, None, 5 0 conv2d_transpose_12[0][0] \n conv2d_64[0][0] \n__________________________________________________________________________________________________\nconv2d_67 (Conv2D) (None, None, None, 2 1179904 concatenate_12[0][0] \n__________________________________________________________________________________________________\nconv2d_68 (Conv2D) (None, None, None, 2 590080 conv2d_67[0][0] \n__________________________________________________________________________________________________\nconv2d_transpose_13 (Conv2DTran (None, None, None, 1 131200 conv2d_68[0][0] \n__________________________________________________________________________________________________\nconcatenate_13 (Concatenate) (None, None, None, 2 0 conv2d_transpose_13[0][0] \n conv2d_62[0][0] \n__________________________________________________________________________________________________\nconv2d_69 (Conv2D) (None, None, None, 1 295040 concatenate_13[0][0] \n__________________________________________________________________________________________________\nconv2d_70 (Conv2D) (None, None, None, 1 147584 conv2d_69[0][0] \n__________________________________________________________________________________________________\nconv2d_transpose_14 (Conv2DTran (None, None, None, 6 32832 conv2d_70[0][0] \n__________________________________________________________________________________________________\nconcatenate_14 (Concatenate) (None, None, None, 1 0 conv2d_transpose_14[0][0] \n conv2d_60[0][0] \n__________________________________________________________________________________________________\nconv2d_71 (Conv2D) (None, None, None, 6 73792 concatenate_14[0][0] \n__________________________________________________________________________________________________\nconv2d_72 (Conv2D) (None, None, None, 6 36928 conv2d_71[0][0] \n__________________________________________________________________________________________________\nconv2d_transpose_15 (Conv2DTran (None, None, None, 3 8224 conv2d_72[0][0] \n__________________________________________________________________________________________________\nconcatenate_15 (Concatenate) (None, None, None, 6 0 conv2d_transpose_15[0][0] \n conv2d_58[0][0] \n__________________________________________________________________________________________________\nconv2d_73 (Conv2D) (None, None, None, 3 18464 concatenate_15[0][0] \n__________________________________________________________________________________________________\nconv2d_74 (Conv2D) (None, None, None, 3 9248 conv2d_73[0][0] \n__________________________________________________________________________________________________\nconv2d_75 (Conv2D) (None, None, None, 3 99 conv2d_74[0][0] \n==================================================================================================\nTotal params: 7,760,163\nTrainable params: 7,760,163\nNon-trainable params: 0\n__________________________________________________________________________________________________\n2/2 [==============================] - 0s 69ms/step - loss: 0.1158 - accuracy: 0.9263\n" ], [ "plt.figure(figsize=[10,5])\nplt.subplot(121)\nplt.plot(history.history['accuracy'])\nprint(history.history.keys())\nplt.xlabel('Epoch')\nplt.ylabel('Accuracy')\nplt.legend(['Training Accuracy',\n 'Validation Accuracy'])\nplt.title('Accuracy Curves')\n\nplt.subplot(122)\nplt.plot(history.history['loss'])\nplt.xlabel('Epoch')\nplt.ylabel('Loss')\nplt.legend(['Training Loss',\n 'Validation Loss'])\nplt.title('Loss Curves')\nplt.show()\n\n\n", "dict_keys(['loss', 'accuracy'])\n" ], [ "\nmodel.save(\"/content/drive/MyDrive/Colab Notebooks/datasets/\"+\"unet.h5\")", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a7b6834464cd9af60b6c7f370a381394f55a141
657,818
ipynb
Jupyter Notebook
wavenet_nb_preproc.ipynb
PengJiwu/Edge-computing-with-tf
68840b3cb9091bc758f5eb6818640959aff17831
[ "MIT" ]
1
2021-11-15T06:38:46.000Z
2021-11-15T06:38:46.000Z
wavenet_nb_preproc.ipynb
PengJiwu/Edge-computing-with-tf
68840b3cb9091bc758f5eb6818640959aff17831
[ "MIT" ]
null
null
null
wavenet_nb_preproc.ipynb
PengJiwu/Edge-computing-with-tf
68840b3cb9091bc758f5eb6818640959aff17831
[ "MIT" ]
1
2021-11-15T06:38:47.000Z
2021-11-15T06:38:47.000Z
643.028348
150,268
0.898286
[ [ [ "# Edge Computing using Tensorflow and Neural Compute Stick", "_____no_output_____" ], [ "## \" Generate piano sounds using EEG capturing rhythmic activity of brain\"", "_____no_output_____" ], [ "### Contents\n\n#### 1. Motivation\n#### 2. Signal acquisition\n#### 3. Signal postprocessing\n#### 4. Synthesize music \n##### 4.1 Training Data\n##### 4.2 Training data preprocessing\n##### 4.3 Neural Network architecture\n##### 4.4 Training methodology\n#### 5. Error definition and Further development\n", "_____no_output_____" ], [ "### 1. Motivation\nThe following work is inspired by EEG. EEG can be described in terms of rhythmic cortical electrical activity of brain triggered by perceived sensory stimuli , where those rythmic activity falls in certain frequency bands(delta to gamma). In sound engineering, signals with dominant frequencies makes a pitch and sequences of pitches creates rhythm. Combining this concepts intuitively shows, by detecting those dominant frequencies, it is possible to listen to our brain using the signals it generates for different stimuli. Using principles of sound synthesis and sampling along with deep neural networks(DNN), in this project, i made an attempt to extract the rhythm or pitch hidding within brain waves and reproduce it as piano music. \n\n### 2. Signal acquisition: (Not available)\n\nEEG/EOG recordings are not available. For the sake of simplicity and bring general working prototype of the model, used some random auto generated signal for test. This is because, the trained DNN is not constrained within brain waves, but to any kind of signal with dominant frequencies. Piano data set available for none commercial use is used during training and evaluation phase.\n\n### 3. Signal Postprocessing (idea proposed)\n\nEnough researches proved, \"brain waves are rhytmic\"[2] and they falls in frequency bandwidth from Delta(<4Hz) to Gamma (>30-100Hz). Human audible frequecy range 20 - 20KHz. Hence, increasing the acquired EEG freuencies by certain constant value and preprocess with sampling rate 44100 makes it resembles piano sounds (fundamental frequency range 27.5 - 4186.01Hz), which itself within human audible range. Later, save the processed brain signals as numpy arrays and convert them as .wav files to reproduce the sound. Using the .wav files to extract brain signal (now sound) informations (frequencies, sampling rate and pitch). In case, we succeed to regenerate the sounds, since we increased the signal frequency by constant (to fit our piano data), the sounds plays faster. Hence we need to reduce the frequency by the same value while replaying the sound that fits the original brain signal.", "_____no_output_____" ], [ "### 4. Synthesize music\n\n#### 4.1 Training data\n\nPiano chords dataset available to public for non commercial purposes \n[3]. Each piano .wav files in the data set are sampled at 44100 and have varying data length. Data is analysed and studied further in detail from the code blocks below.\n", "_____no_output_____" ], [ "#### 4.2 Training data preprocessing", "_____no_output_____" ], [ "###### Import required python libraries and add the current working directory to python path and system paths\n\nDirectory structure\n<br>\n<br>\nWavenet/\n\n -/dataset (downloaded piano chords)\n \n - /UMAPiano-DB-Poly-1/UMAPiano-DB-A0-NO-F.wav\n \n -/clipped_data (clipped paino sounds are here)\n \n -/wavenet_logs (tensorflow checkpoints and logs)", "_____no_output_____" ] ], [ [ "%matplotlib inline\nfrom __future__ import division\nimport numpy as np\nimport tensorflow as tf\nimport scipy.io\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport os\nimport sys\nimport random\nimport scipy.io.wavfile\nimport scipy\nmatplotlib.rcParams['figure.figsize'] = (8.0, 6.0)\n\n#-------------------------------------Add working directory to path-----------------------------------------------\n\ncwd = os.getcwd()\nsys.path.append(cwd)\nsys.path.insert(0,'E:/!CogSci/!!!WS2017/Edge_computing/Wavenet') \nsys.path.insert(0,'C:/Users/Saran/!!!!!!!!!!!!!Edge_computing/Wavenet/dataset')\nsys.path.insert(0,'E:/!CogSci/!!!WS2017/Edge_computing/Wavenet/clipped_data')\n# Save the variables in a log/directory during training\n\nsave_path = \"C:/Users/Saran/!!!!!!!!!!!!!Edge_computing/Wavenet/wavenet_logs\"\nif not os.path.exists(save_path):\n os.makedirs(save_path)\n\n", "_____no_output_____" ] ], [ [ "Each piano file from the dataset is approximately 1-2 seconds in length. We used the scipy to read each music file and get their sampling rate and data as array and found that all audio files has sampling rate 44100 and the data length varies based on length of audio. To train DNN, we need all training data with same length and increase the sampling rate to prevent signal loss/corruption. Below code shows the acquisition of first information about the piano dataset. ", "_____no_output_____" ] ], [ [ "# Location of the wav file in the file system.\n\nfileName1 = 'E:/!CogSci/!!!WS2017/Edge_computing/Wavenet/dataset/UMAPiano-DB-Poly-1/UMAPiano-DB-A0-NO-F.wav'\nfileName2 = 'E:/!CogSci/!!!WS2017/Edge_computing/Wavenet/dataset/UMAPiano-DB-Poly-1/UMAPiano-DB-A0-NO-M.wav'\n\n# Loads sample rate (bps) and signal data (wav). \n\nsample_rate1, data1 = scipy.io.wavfile.read(fileName1)\nsample_rate2, data2 = scipy.io.wavfile.read(fileName2)\n\n\n# Print in sdout the sample rate, number of items and duration in seconds of the wav file\nprint(\"Sample rate1 %s data size1 %s duration1: %s seconds\"%(sample_rate1,data1.shape,len(data1)/sample_rate1))\nprint(\"Sample rate2 %s data size2 %s duration2: %s seconds\"%(sample_rate2,data2.shape,len(data2)/sample_rate2))\nprint(\"DATA SIZES ARE DIFFERENT NEEDS TO BE CONSIDERED\")\n\n# Plot the wave file and get insight about the sample. Here we test first 100 samples of the wav file\n\nplt.plot(data1)\nplt.plot(data2)\nplt.show()\n\n\n", "Sample rate1 44100 data size1 (96271,) duration1: 2.183015873015873 seconds\nSample rate2 44100 data size2 (95642,) duration2: 2.1687528344671203 seconds\nDATA SIZES ARE DIFFERENT NEEDS TO BE CONSIDERED\n" ] ], [ [ "Looking at the plot above, it is clear that there is no signal informations at the head and tail of the piano data. We can clip them safely and that reduces computation and memory resources. Also, i changed all the data file names with numbers for convenient. Later, i checked the files with shortest and longest length to fix varying length problem in the code block below.", "_____no_output_____" ] ], [ [ "\"\"\"\ndataset_path = 'E:/!CogSci/!!!WS2017/Edge_computing/Wavenet/dataset/UMAPiano-DB-Poly-1'\ndir_list_len = len(os.listdir(dataset_path))\nprint(\"Number of files in the Dataset \",dir_list_len)\n\n# Change file names to be easily recognized\n\ndef change_filenames(dataset_path):\n \n i = 0 # Counter and target filename\n \n for old_name in os.listdir(dataset_path):\n # os.rename(dataset_path + \"/\" + old_name, dataset_path + \"/\" + str(i) + '.wav')\n os.rename(os.path.join(dataset_path, old_name), os.path.join(dataset_path, str(i) + '.wav'))\n i+=1 \n\nchange_filenames(dataset_new)\nlist_sizes_new =[] \n\nfor data_new in os.listdir(dataset_new):\n _,data_new = scipy.io.wavfile.read(dataset_new+'/'+data_new)\n list_sizes_new.append(data_new.shape[0])\nprint(\"Maximum size %s and the music file is\",np.argmax(list_sizes_new))\n\n\"\"\"\n\n\ndataset_new = 'C:/Users/Saran/!!!!!!!!!!!!!Edge_computing/Wavenet/dataset'\nlist_sizes =[] \nfor datas in os.listdir(dataset_new):\n _,data_new = scipy.io.wavfile.read(os.path.join(dataset_new,datas))\n list_sizes.append(data_new.shape[0])\n if data_new.shape[0]== 39224:\n print(\"Minimum sized file is\",datas)\n if data_new.shape[0] == 181718:\n print(\"Max sized file is\",datas)\n\nprint(\"Maximum size %s \"%(max(list_sizes)))\nprint(\"Minimum size %s \"%(min(list_sizes)))\n\nprint(\"Dataset is in C:/Users/Saran/!!!!!!!!!!!!!Edge_computing/Wavenet/dataset and all the files are numbered\")\n\n# -------------------------Get some insights and information about the max and min sized data-----------------------------\n\n# Location of the wav file in the file system.\nfileName3 = 'C:/Users/Saran/!!!!!!!!!!!!!Edge_computing/Wavenet/dataset/356.wav'\nfileName4 = 'C:/Users/Saran/!!!!!!!!!!!!!Edge_computing/Wavenet/dataset/722.wav'\n# Loads sample rate (bps) and signal data (wav). \nsample_rate3, data3 = scipy.io.wavfile.read(fileName3)\nsample_rate4, data4 = scipy.io.wavfile.read(fileName4)\n\n\n# Print in sdout the sample rate, number of items and duration in seconds of the wav file\nprint(\"Sample rate3 %s data size3 %s duration3: %s seconds\"%(sample_rate3,data3.shape,len(data3)/sample_rate3))\nprint(\"Sample rate4 %s data size4 %s duration4: %s seconds\"%(sample_rate4,data4.shape,len(data4)/sample_rate4))\nprint(\"Data sizes are different\")\n\n# Plot the wave file and get insight about the sample. Here we test first 100 samples of the wav file\n\nplt.plot(data4)\nplt.show()\n\nprint(\"Safe to clip first 10000 sample points out from the array and convert them back to .wav file\")", "Max sized file is 356.wav\nMinimum sized file is 722.wav\nMaximum size 181718 \nMinimum size 39224 \nDataset is in C:/Users/Saran/!!!!!!!!!!!!!Edge_computing/Wavenet/dataset and all the files are numbered\nSample rate3 44100 data size3 (181718,) duration3: 4.120589569160998 seconds\nSample rate4 44100 data size4 (39224,) duration4: 0.8894331065759638 seconds\nData sizes are different\n0\n0\n0\n0\n" ] ], [ [ "As we can see that even the smallest piano file has 20k values of zeros at head and tail combined. Hence it is safe to clip the first and last 10k indices from all files and save them back to .wav files. We can also add small amount of noise to the training data at this step using the code below. We will discuss the reason later briefly.", "_____no_output_____" ] ], [ [ "#----------------------- .WAV training data preprocessing steps ----------------------\nimport IPython\n\n# Clip the first and last 10000 values which doesn't show any informations \n\"\"\"\ndef clip_write_wav(dataset_path):\n \n i = 0 # Counter and target filename\n \n for datas in os.listdir(dataset_path):\n \n _,data = scipy.io.wavfile.read(dataset_path+'/'+datas)\n data= data[:-10000] # Slice out last 10000 elements in data\n data= data[10000:] # Slice out first 10000 elements in the data\n \n #IF ADD NOISE DO it here in the data which is an array.\n \n scipy.io.wavfile.write('C:/Users/Saran/!!!!!!!!!!!!!Edge_computing/Wavenet/clipped_data/%i.wav'%i, 44100, data) \n \n i+=1 \n \n \"\"\"\n\n_dataset = 'C:/Users/Saran/!!!!!!!!!!!!!Edge_computing/Wavenet/dataset'\n_target = 'C:/Users/Saran/!!!!!!!!!!!!!Edge_computing/Wavenet/clipped_data'\nclip_points = 10000\n_sampling_rate = 44100\n\n# clip_write_wav(_dataset) # Uncomment this line to clip and write the wav files again\n\n\n# Verify required informations again\nsample_rate3, data3 = scipy.io.wavfile.read('C:/Users/Saran/!!!!!!!!!!!!!Edge_computing/Wavenet/clipped_data/3.wav')\nprint(\"Sample rate %s data size %s duration: %s seconds\"%(sample_rate3,data3.shape,len(data3)/sample_rate3))\nplt.plot(data3)\nplt.show()\n\n#Play the audio inline\n\nIPython.display.Audio('C:/Users/Saran/!!!!!!!!!!!!!Edge_computing/Wavenet/clipped_data/3.wav')\n\n", "Sample rate 44100 data size (56293,) duration: 1.2764852607709751 seconds\n" ] ], [ [ "The data are clipped and they have shorter neck and tail now. Now we will increase the sampling rate (using \"write_wav\" function below) and fix the varying length in data by choosing the data with longest length as reference and zero padd other data until their length matches the length of the largest file done while feeding DNN using \"get_training_data\" function below .\n<br>\nBut the scipy read module doesn't preserve the indices of the files in the dataset, as we can see that the largest and smallest file names from code block above and below are different. So, i hard coded the size of smallest and largest and search for the corresponding files.\n\n", "_____no_output_____" ] ], [ [ "# ------------- Search for the largest and smallest files --------------\n\n_dataset_new = 'C:/Users/Saran/!!!!!!!!!!!!!Edge_computing/Wavenet/clipped_data'\n_list_sizes =[] \n\nfor datas in os.listdir(_dataset_new):\n _,_data_new = scipy.io.wavfile.read(os.path.join(_dataset_new,datas))\n _list_sizes.append(_data_new.shape[0])\n if _data_new.shape[0]== 19224:\n print(\"Minimum sized file is\",datas)\n if _data_new.shape[0] == 161718:\n print(\"Max sized file is\",datas)\n\nprint(\"Maximum size %s \"%(max(_list_sizes)))\nprint(\"Minimum size %s \"%(min(_list_sizes)))\nprint(\"Notice that io read and write doesnt preserve the index of files in the directory\")", "Max sized file is 286.wav\nMinimum sized file is 693.wav\nMaximum size 161718 \nMinimum size 19224 \nNotice that io read and write doesnt preserve the index of files in the directory\n" ], [ "# ------------------------ Upsample the data -----------------------------\n\"\"\"\ndef write_wav(dataset_path):\n i=0\n for datas in os.listdir(dataset_path):\n \n _,data = scipy.io.wavfile.read(dataset_path+'/'+datas)\n \n #IF ADD NOISE DO it here in the data which is an array.\n \n scipy.io.wavfile.write('C:/Users/Saran/!!!!!!!!!!!!!Edge_computing/Wavenet/upsampled_data/%i.wav'%i, 88000, data) \n \n i+=1\n\nwrite_wav(_dataset_new)\n\"\"\"", "_____no_output_____" ], [ "# ----------------- Verifying data integrity again -----------------------\n\nsampled_datapath ='C:/Users/Saran/!!!!!!!!!!!!!Edge_computing/Wavenet/upsampled_data'\n_list_sizes =[] \n\nfor datas in os.listdir(sampled_datapath):\n sampling_rate,_data_new = scipy.io.wavfile.read(os.path.join(sampled_datapath,datas))\n _list_sizes.append(_data_new.shape[0])\n \n if _data_new.shape[0]== 19224:\n print(\"Minimum sized file is %s and sampling rate\"%datas,sampling_rate)\n \n elif _data_new.shape[0] == 161718:\n print(\"Max sized file is %s and sampling rate\"%datas,sampling_rate)\n\n\nprint(\"Maximum size %s \"%(max(_list_sizes)))\nprint(\"Minimum size %s \"%(min(_list_sizes)))\n\n\n# Verify required informations again\n\nsample_rate5, data5 = scipy.io.wavfile.read('C:/Users/Saran/!!!!!!!!!!!!!Edge_computing/Wavenet/upsampled_data/3.wav')\nprint(\"Sample rate %s data size %s duration: %s seconds\"%(sample_rate5,data5.shape,len(data5)/sample_rate5))\nplt.plot(data5)\nplt.show()\n\n#Play the audio inline\n\nIPython.display.Audio('C:/Users/Saran/!!!!!!!!!!!!!Edge_computing/Wavenet/clipped_data/3.wav')", "Max sized file is 208.wav and sampling rate 88000\nMinimum sized file is 660.wav and sampling rate 88000\nMaximum size 161718 \nMinimum size 19224 \nSample rate 88000 data size (49966,) duration: 0.5677954545454545 seconds\n" ] ], [ [ "Since, we use stacks of CNN in the encoder, i decided to convert the data as matrix of size 512*512 for which \nwe need each file to have 262144 entries. So, instead of using largest file as reference, i opted 262144 as a length limit for all files. Function \"get_training_data\" serve this purpose for us.", "_____no_output_____" ] ], [ [ "\n# Each audio file should have 262144 entries. Extend them all with zeros in the tail\n# Convert all audio files as matrices of 512x512 shape\n\ndef get_training_data(dataset_path):\n \n training_data = []\n \n for datas in os.listdir(dataset_path):\n \n _,data = scipy.io.wavfile.read(dataset_path+'/'+datas)\n # Add Zeros at the tail until 262144\n temp_zeros = [0]*262144\n\n temp_zeros[:len(data)] = data # Slice temp_zeros and add the data into the slice\n\n # Reshape the data as square matrix of 512*512 of size 262144\n data_ = np.reshape(temp_zeros,(512,512))\n training_data.append(data_)\n \n return training_data\n\ntraining_data = get_training_data('C:/Users/Saran/!!!!!!!!!!!!!Edge_computing/Wavenet/upsampled_data')\n\nprint(training_data[0].shape)\n\n# Expand the dims # The third dimension represents number of channels\n\nfor i in range(len(training_data)):\n training_data[i] = training_data[i][:,:,np.newaxis]\nprint(training_data[0].shape)\n \n", "(512, 512)\n(512, 512, 1)\n" ] ], [ [ "The training data is ready to be fed into the network. But we still require the pitch info about each training data, since the network architecture we use require them while training. Class \"HarmonicPowerSpectrum\" and the nesxt two code blocks are bandpass filtering the signal that ease pitch detection.\n ", "_____no_output_____" ] ], [ [ "# Get pitch of corresponding data\n\n\"\"\"\nSteps to extract the pitches of input signal:\nReference:\nhttps://stackoverflow.com/questions/43946112/slicing-audio-signal-to-detect-pitch\n\n1. Detect the fundamental frequencies \"f0 estimation\" (For piano, lowest freq - 27.5 and highest - 4186.01 Hz)\n2. Get ride of garbage transients and low frequency noise using bandpass filter \n3. After filtering do the peak detection using fft to find the pitches \n\"\"\"\n\n# 1. Fundamental frequencies [27.5,4186.01] Hz\n# 2. Build bandpass fileter\n\nfrom scipy.signal import butter, lfilter\n\ndef butter_bandpass(f0, fs, order):\n \n \"\"\"Give the Sampling freq(fs),Bandpass window(f0) of filter, build the bandpass filter\"\"\"\n \n nyq = 0.5 * fs\n low = f0[0] / nyq\n high = f0[1] / nyq\n b, a = butter(order, [low, high], btype='band') # Numerator (b) and denominator (a) polynomials of the IIR filter\n \n return b, a\n\ndef butter_bandpass_filter(sig, f0, fs, order):\n \n \"\"\" Apply bandpass filter to the given signal\"\"\"\n \n b, a = butter_bandpass(f0, fs,order)\n y = lfilter(b, a, sig) # Apply the filter to the signal\n \n \n return y \n \n# Verify filter signal \nsig = data5\nf0= (27.5, 4186.01) # Fundamental freq of piano\nfs = sample_rate5 # sampling rate of .wav files in the preprocessed training dataset\norder = 1\nb, a = butter_bandpass(f0, fs, order=1) # Numerator (b) and denominator (a) polynomials of the IIR filter\nfiltered_sig= butter_bandpass_filter(sig, f0,fs,order=1)\n\n# Plot some range of samples from both raw signal and bandpass fitered signal.\n\nplt.plot(sig[10000:10500], label='training signal')\nplt.plot(filtered_sig[10000:10500], label='Bandpass filtered signal with order %d'% order)\nplt.legend(loc='upper left')\n\n# orders = [1,2,3,4,5]\n# for order in orders:\n# filtered_sig= butter_bandpass_filter(sig, f0,fs,order) # Bandpass filtered signal\n# plt.plot(data5[10000:10500], label='training signal')\n# plt.plot(filtered_sig[10000:10500], label='Bandpass filtered signal with order %d'% order)\n# plt.legend(loc='upper left')\n\nprint(\"Bandpass filter with order 1 looks okay. We do not want to loose much informations in the data by filter it with higher orders\")\n\n", "Bandpass filter with order 1 looks okay. We do not want to loose much informations in the data by filter it with higher orders\n" ], [ "# Reference :https://github.com/pydanny/pydanny-event-notes/blob/master/Pycon2008/intro_to_numpy/files/pycon_demos/windowed_fft/short_time_fft_solution.py\n# Get frequency components of the data using Short time fourier transform\n\nfrom scipy.fftpack import fft, fftfreq, fftshift\nfrom scipy.signal import get_window\nfrom math import ceil\nfrom pylab import figure, imshow, clf, gray, xlabel, ylabel\n\n\nsig = data5\nf0= (27.5, 4186.01) # Fundamental freq of piano\nfs = sample_rate5 # sampling rate of .wav files in the preprocessed training dataset\n\n\ndef freq_comp(signal,sample_rate):\n \n # Define the sample spacing and window size.\n dT = 1.0/sample_rate\n T_window = 50e-3 # 50ms ; window time frame\n N_window = int(T_window * sample_rate) # 440 \n N_data = len(signal) \n\n # 1. Get the window profile\n window = get_window('hamming', N_window) # Multiply the segments of data using hamming window func\n\n # 2. Set up the FFT\n result = []\n start = 0\n while (start < N_data - N_window):\n end = start + N_window\n result.append(fftshift(fft(window*signal[start:end])))\n start = end\n\n result.append(fftshift(fft(window*signal[-N_window:])))\n result = np.array(result,result[0].dtype)\n \n return result\n\nfreq_comp_unfiltered = freq_comp(sig,fs)\nfreq_comp_filtered = freq_comp(filtered_sig,fs)\nplt.figure(1)\nplt.plot(freq_comp_unfiltered)\nplt.title(\"Unfiltered Frequency componenets of the training signal\")\nplt.show()\nplt.figure(2)\nplt.plot(freq_comp_filtered)\nplt.title(\"Filtered frequency component of the training signal\")\nplt.show()\n\n\n# # Display results\n# freqscale = fftshift(fftfreq(N_window,dT))[150:-150]/1e3\n# figure(1)\n# clf()\n# imshow(abs(result[:,150:-150]),extent=(freqscale[-1],freqscale[0],(N_data*dT-T_window/2.0),T_window/2.0))\n# xlabel('Frequency (kHz)')\n# ylabel('Time (sec.)')\n# gray()\n", "c:\\users\\saran\\anaconda3\\envs\\tensorflow\\lib\\site-packages\\numpy\\core\\numeric.py:492: ComplexWarning: Casting complex values to real discards the imaginary part\n return array(a, dtype, copy=False, order=order)\n" ], [ "# Reference: http://musicweb.ucsd.edu/~trsmyth/analysis/Harmonic_Product_Spectrum.html\n# Get the fundamental frequency(peak frequency) of the training data\n\nimport parabolic\nfrom pylab import subplot, plot, log, copy, show\n\n# def hps(sig,fs,maxharms):\n \n# \"\"\"\n# Estimate peak frequency using harmonic product spectrum (HPS)\n# \"\"\"\n# window = sig * scipy.signal.blackmanharris(len(sig))\n\n# # Harmonic product spectrum: Measures the maximum coincidence for harmonics for each spectral frame\n \n# c = abs(np.fft.rfft(window)) # Compute the one-dimensional discrete Fourier Transform for real input.\n# plt.plot(c)\n# plt.title(\"Discrete fourier transform of signal\")\n# plt.figure()\n# pitch = np.log(c)\n# plt.plot(pitch)\n# plt.title(\"Max Harmonics for the range same as fundamental frequencies\")\n \n# # Search for a maximum value of a range of possible fundamental frequencies\n# # for x in range(2, maxharms):\n# # a = copy(c[::x]) # Should average or maximum instead of decimating\n# # c = c[:len(a)]\n# # i = np.argmax(abs(c))\n# # c *= a\n# # plt.title(\"Max Harmonics for the range of %d times the fundamental frequencies\"%x)\n# # plt.plot(maxharms, x)\n# # plt.plot(np.log(c))\n# # show()\n\n# hps(butter_bandpass_filter(sig,f0, fs,order = 1),fs,maxharms=0)\n# print(\" As usual we opt to choose the same range as fundamental frequecies to make sure we dont loss much informations\")", "_____no_output_____" ], [ "# Wrap them all in one class HarmonicPowerSpectrum\n\nclass HarmonicPowerSpectrum(object):\n \n def __init__(self,sig,f0,fs,order,maxharms):\n self.sig = sig\n self.f0 = f0\n self.fs = fs\n self.order = order\n self.maxharms = maxharms\n\n @property \n def butter_bandpass(self):\n \n \"\"\"Give the Sampling freq(fs),Bandpass window(f0) of filter, build the bandpass filter\"\"\"\n \n nyq = 0.5 * fs # Nyquist frequency\n low = self.f0[0] / nyq\n high = self.f0[1] / nyq\n b, a = butter(self.order, [low, high], btype='band') # Numerator (b) and denominator (a) polynomials of the IIR filter\n\n return b, a\n\n @property\n def butter_bandpass_filter(self):\n \n \"\"\" Apply bandpass filter to the given signal\"\"\"\n \n b, a = self.butter_bandpass\n y = lfilter(b, a, self.sig) # Apply the filter to the signal\n\n return y \n\n @property\n def hps(self):\n \n \"\"\"Estimate peak frequency using harmonic product spectrum (HPS)\"\"\"\n \n y = self.butter_bandpass_filter\n window = y * scipy.signal.blackmanharris(len(y)) #Create window to search harmonics in signal slices\n\n # Harmonic product spectrum: Measures the maximum coincidence for harmonics for each spectral frame\n \n c = abs(np.fft.rfft(window)) # Compute the one-dimensional discrete Fourier Transform for real input.\n z = np.log(c) # Fundamental frequency or pitch of the given signal \n\n return z\n \n \nz = HarmonicPowerSpectrum(sig, f0, fs, order = 1,maxharms=0)\nharm_pow_spec = z.hps\nplt.figure(1)\nplt.plot(harm_pow_spec)\nplt.title(\"Max Harmonics for the range same as fundamental frequencies Bp filtered in Order 0 and max harmonic psectum 0\")\nfreq_comp_hps = freq_comp(harm_pow_spec,fs)\nplt.figure(2)\nplt.plot(freq_comp_hps)\nplt.title(\"\"\"Frequency components(in logarithmix scale) of harmonic spectrum of filtered training data. \n A harmonic set of two pitches contributing significantly to this piano chord\"\"\")\nplt.show()\n\n", "c:\\users\\saran\\anaconda3\\envs\\tensorflow\\lib\\site-packages\\numpy\\core\\numeric.py:492: ComplexWarning: Casting complex values to real discards the imaginary part\n return array(a, dtype, copy=False, order=order)\n" ] ], [ [ "Hence, i updated the get_training_data function to perform pitch detection using the HarmonicPowerSpectrum analyser \nas seen below.", "_____no_output_____" ] ], [ [ "# Each audio file should have 262144 entries. Extend them all with zeros in the tail\n# Convert all audio files as matrices of 512x512 shape\n\ndef get_training_data(dataset_path, f0, fs, order = 1,maxharms=0):\n \n training_data = []\n pitch_data = []\n \n for datas in os.listdir(dataset_path):\n \n _,data = scipy.io.wavfile.read(dataset_path+'/'+datas)\n # Add Zeros at the tail until 162409\n temp_zeros_data = [0]*262144\n # print(\"Unpadded data len\",len(data))\n # print(len(temp_zeros))\n temp_zeros_data[:len(data)] = data # Slice temp_zeros and add the data into the slice\n # print(\"Padded data len\",len(temp_zeros))\n # print(np.shape(temp_zeros))\n # Reshape the data as square matrix of 403*403 of size 162409\n data_ = np.reshape(temp_zeros_data,(512,512))\n # Get pitch of the signal\n z = HarmonicPowerSpectrum(temp_zeros_data, f0, fs, order = 1,maxharms=0)\n harm_pow_spec = z.hps\n \n training_data.append(data_)\n pitch_data.append(harm_pow_spec)\n \n return training_data,pitch_data\n\ntraining_data,pitch_data = get_training_data('C:/Users/Saran/!!!!!!!!!!!!!Edge_computing/Wavenet/upsampled_data',f0, fs, order = 1,maxharms=0)\n\n\nprint(training_data[0].shape)\n\n# Expand the dims # The third dimension represents number of channels\n\nfor i in range(len(training_data)):\n training_data[i] = training_data[i][:,:,np.newaxis]\nprint(training_data[0].shape)", "(512, 512)\n(512, 512, 1)\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
4a7b7252b4aaec868fb7cee21353813060c468d1
6,601
ipynb
Jupyter Notebook
docs/notebooks/customize.ipynb
aaronspring/xclim
393797308a581428c229920a3b0933c8243cd03a
[ "Apache-2.0" ]
169
2018-09-14T21:14:40.000Z
2022-03-29T14:27:06.000Z
docs/notebooks/customize.ipynb
aaronspring/xclim
393797308a581428c229920a3b0933c8243cd03a
[ "Apache-2.0" ]
950
2018-08-23T16:13:17.000Z
2022-03-28T19:30:59.000Z
docs/notebooks/customize.ipynb
aaronspring/xclim
393797308a581428c229920a3b0933c8243cd03a
[ "Apache-2.0" ]
35
2018-12-04T21:12:10.000Z
2022-03-04T20:38:07.000Z
32.043689
371
0.608393
[ [ [ "# Customizing and controlling xclim\n\nxclim's behaviour can be controlled globally or contextually through `xclim.set_options`, which acts the same way as `xarray.set_options`. For the extension of xclim with the addition of indicators, see the [Extending xclim](extendxclim.ipynb) notebook.", "_____no_output_____" ] ], [ [ "import xarray as xr\nimport xclim\nfrom xclim.testing import open_dataset", "_____no_output_____" ] ], [ [ "Let's create fake data with some missing values and mask every 10th, 20th and 30th of the month.This represents 9.6-10% of masked data for all months except February where it is 7.1%.", "_____no_output_____" ] ], [ [ "tasmax = (\n xr.tutorial.open_dataset(\"air_temperature\")\n .air.resample(time=\"D\")\n .max(keep_attrs=True)\n)\ntasmax = tasmax.where(tasmax.time.dt.day % 10 != 0)", "_____no_output_____" ] ], [ [ "## Checks\nAbove, we created fake temperature data from a xarray tutorial dataset that doesn't have all the standard CF attributes. By default, when triggering a computation with an Indicator from xclim, warnings will be raised:", "_____no_output_____" ] ], [ [ "tx_mean = xclim.atmos.tx_mean(tasmax=tasmax, freq=\"MS\") # compute monthly max tasmax", "_____no_output_____" ] ], [ [ "Setting `cf_compliance` to `'log'` mutes those warnings and sends them to the log instead.", "_____no_output_____" ] ], [ [ "xclim.set_options(cf_compliance=\"log\")\n\ntx_mean = xclim.atmos.tx_mean(tasmax=tasmax, freq=\"MS\") # compute monthly max tasmax", "_____no_output_____" ] ], [ [ "## Missing values\n\nFor example, one can globally change the missing method.\n\nChange the default missing method to \"pct\" and set its tolerance to 8%:", "_____no_output_____" ] ], [ [ "xclim.set_options(check_missing=\"pct\", missing_options={\"pct\": {\"tolerance\": 0.08}})\n\ntx_mean = xclim.atmos.tx_mean(tasmax=tasmax, freq=\"MS\") # compute monthly max tasmax\ntx_mean.sel(time=\"2013\", lat=75, lon=200)", "_____no_output_____" ] ], [ [ "Only February has non-masked data. Let's say we want to use the \"wmo\" method (and its default options), but only once, we can do:", "_____no_output_____" ] ], [ [ "with xclim.set_options(check_missing=\"wmo\"):\n tx_mean = xclim.atmos.tx_mean(\n tasmax=tasmax, freq=\"MS\"\n ) # compute monthly max tasmax\ntx_mean.sel(time=\"2013\", lat=75, lon=200)", "_____no_output_____" ] ], [ [ "This method checks that there is less than `nm=5` invalid values in a month and that there are no consecutive runs of `nc>=4` invalid values. Thus, every month is now valid.\n\nFinally, it is possible for advanced users to register their own method. Xclim's missing methods are in fact based on class instances. Thus, to create a custom missing class, one should implement a subclass based on `xclim.core.checks.MissingBase` and overriding at least the `is_missing` method. The method should take a `null` argument and a `count` argument.\n\n- `null` is a `DataArrayResample` instance of the resampled mask of invalid values in the input dataarray.\n- `count` is the number of days in each resampled periods and any number of other keyword arguments. \n\nThe `is_missing` method should return a boolean mask, at the same frequency as the indicator output (same as `count`), where True values are for elements that are considered missing and masked on the output.\n\nWhen registering the class with the `xclim.core.checks.register_missing_method` decorator, the keyword arguments will be registered as options for the missing method. One can also implement a `validate` static method that receives only those options and returns whether they should be considered valid or not.", "_____no_output_____" ] ], [ [ "from xclim.core.missing import register_missing_method\nfrom xclim.core.missing import MissingBase\nfrom xclim.indices.run_length import longest_run\n\n\n@register_missing_method(\"consecutive\")\nclass MissingConsecutive(MissingBase):\n \"\"\"Any period with more than max_n consecutive missing values is considered invalid\"\"\"\n\n def is_missing(self, null, count, max_n=5):\n return null.map(longest_run, dim=\"time\") >= max_n\n\n @staticmethod\n def validate(max_n):\n return max_n > 0", "_____no_output_____" ] ], [ [ "The new method is now accessible and usable with:", "_____no_output_____" ] ], [ [ "with xclim.set_options(\n check_missing=\"consecutive\", missing_options={\"consecutive\": {\"max_n\": 2}}\n):\n tx_mean = xclim.atmos.tx_mean(\n tasmax=tasmax, freq=\"MS\"\n ) # compute monthly max tasmax\ntx_mean.sel(time=\"2013\", lat=75, lon=200)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a7b7d2022e368917832e14c116fe039c4520f8b
123,596
ipynb
Jupyter Notebook
student-admissions-keras/StudentAdmissionsKeras.ipynb
govinsprabhu/udacity-dlnd
24781b80894cc37b8b5759e604c7edaf7fb2fbfc
[ "MIT" ]
null
null
null
student-admissions-keras/StudentAdmissionsKeras.ipynb
govinsprabhu/udacity-dlnd
24781b80894cc37b8b5759e604c7edaf7fb2fbfc
[ "MIT" ]
null
null
null
student-admissions-keras/StudentAdmissionsKeras.ipynb
govinsprabhu/udacity-dlnd
24781b80894cc37b8b5759e604c7edaf7fb2fbfc
[ "MIT" ]
null
null
null
134.197611
28,116
0.8312
[ [ [ "# Predicting Student Admissions with Neural Networks in Keras\nIn this notebook, we predict student admissions to graduate school at UCLA based on three pieces of data:\n- GRE Scores (Test)\n- GPA Scores (Grades)\n- Class rank (1-4)\n\nThe dataset originally came from here: http://www.ats.ucla.edu/\n\n## Loading the data\nTo load the data and format it nicely, we will use two very useful packages called Pandas and Numpy. You can read on the documentation here:\n- https://pandas.pydata.org/pandas-docs/stable/\n- https://docs.scipy.org/", "_____no_output_____" ] ], [ [ "# Importing pandas and numpy\n%matplotlib inline\nimport pandas as pd\nimport numpy as np\n\n# Reading the csv file into a pandas DataFrame\ndata = pd.read_csv('student_data.csv')\n\n# Printing out the first 10 rows of our data\ndata[:10]", "_____no_output_____" ] ], [ [ "## Plotting the data\n\nFirst let's make a plot of our data to see how it looks. In order to have a 2D plot, let's ingore the rank.", "_____no_output_____" ] ], [ [ "# Importing matplotlib\nimport matplotlib.pyplot as plt\n\n# Function to help us plot\ndef plot_points(data):\n X = np.array(data[[\"gre\",\"gpa\"]])\n y = np.array(data[\"admit\"])\n admitted = X[np.argwhere(y==1)]\n rejected = X[np.argwhere(y==0)]\n plt.scatter([s[0][0] for s in rejected], [s[0][1] for s in rejected], s = 25, color = 'red', edgecolor = 'k')\n plt.scatter([s[0][0] for s in admitted], [s[0][1] for s in admitted], s = 25, color = 'cyan', edgecolor = 'k')\n plt.xlabel('Test (GRE)')\n plt.ylabel('Grades (GPA)')\n \n# Plotting the points\nplot_points(data)\nplt.show()", "_____no_output_____" ] ], [ [ "Roughly, it looks like the students with high scores in the grades and test passed, while the ones with low scores didn't, but the data is not as nicely separable as we hoped it would. Maybe it would help to take the rank into account? Let's make 4 plots, each one for each rank.", "_____no_output_____" ] ], [ [ "# Separating the ranks\ndata_rank1 = data[data[\"rank\"]==1]\ndata_rank2 = data[data[\"rank\"]==2]\ndata_rank3 = data[data[\"rank\"]==3]\ndata_rank4 = data[data[\"rank\"]==4]\n\n# Plotting the graphs\nplot_points(data_rank1)\nplt.title(\"Rank 1\")\nplt.show()\nplot_points(data_rank2)\nplt.title(\"Rank 2\")\nplt.show()\nplot_points(data_rank3)\nplt.title(\"Rank 3\")\nplt.show()\nplot_points(data_rank4)\nplt.title(\"Rank 4\")\nplt.show()", "_____no_output_____" ] ], [ [ "This looks more promising, as it seems that the lower the rank, the higher the acceptance rate. Let's use the rank as one of our inputs. In order to do this, we should one-hot encode it.\n\n## One-hot encoding the rank\nFor this, we'll use the `get_dummies` function in pandas.", "_____no_output_____" ] ], [ [ "# Make dummy variables for rank\none_hot_data = pd.concat([data, pd.get_dummies(data['rank'], prefix='rank')], axis=1)\n\n# Drop the previous rank column\none_hot_data = one_hot_data.drop('rank', axis=1)\n\n# Print the first 10 rows of our data\none_hot_data[:10]", "_____no_output_____" ] ], [ [ "## Scaling the data\nThe next step is to scale the data. We notice that the range for grades is 1.0-4.0, whereas the range for test scores is roughly 200-800, which is much larger. This means our data is skewed, and that makes it hard for a neural network to handle. Let's fit our two features into a range of 0-1, by dividing the grades by 4.0, and the test score by 800.", "_____no_output_____" ] ], [ [ "# Copying our data\nprocessed_data = one_hot_data[:]\n\n# Scaling the columns\nprocessed_data['gre'] = processed_data['gre']/800\nprocessed_data['gpa'] = processed_data['gpa']/4.0\nprocessed_data[:10]", "_____no_output_____" ] ], [ [ "## Splitting the data into Training and Testing", "_____no_output_____" ], [ "In order to test our algorithm, we'll split the data into a Training and a Testing set. The size of the testing set will be 10% of the total data.", "_____no_output_____" ] ], [ [ "sample = np.random.choice(processed_data.index, size=int(len(processed_data)*0.9), replace=False)\ntrain_data, test_data = processed_data.iloc[sample], processed_data.drop(sample)\n\nprint(\"Number of training samples is\", len(train_data))\nprint(\"Number of testing samples is\", len(test_data))\nprint(train_data[:10])\nprint(test_data[:10])", "Number of training samples is 360\nNumber of testing samples is 40\n admit gre gpa rank_1 rank_2 rank_3 rank_4\n81 0 0.775 0.7675 0 1 0 0\n110 0 0.850 0.7700 0 0 0 1\n156 0 0.700 0.6300 0 1 0 0\n261 0 0.550 0.7875 0 1 0 0\n12 1 0.950 1.0000 1 0 0 0\n391 1 0.825 0.9700 0 1 0 0\n169 0 0.750 0.9050 0 0 1 0\n3 1 0.800 0.7975 0 0 0 1\n101 0 0.725 0.8925 0 0 1 0\n35 0 0.500 0.7625 0 1 0 0\n admit gre gpa rank_1 rank_2 rank_3 rank_4\n6 1 0.700 0.7450 1 0 0 0\n9 0 0.875 0.9800 0 1 0 0\n38 1 0.625 0.7825 0 1 0 0\n44 0 0.875 0.7350 0 1 0 0\n46 1 0.725 0.8650 0 1 0 0\n50 0 0.800 0.9650 0 0 1 0\n64 0 0.725 1.0000 0 0 1 0\n67 0 0.775 0.8250 1 0 0 0\n68 0 0.725 0.9225 1 0 0 0\n69 0 1.000 0.9325 1 0 0 0\n" ] ], [ [ "## Splitting the data into features and targets (labels)\nNow, as a final step before the training, we'll split the data into features (X) and targets (y).\n\nAlso, in Keras, we need to one-hot encode the output. We'll do this with the `to_categorical function`.", "_____no_output_____" ] ], [ [ "import keras\n\n# Separate data and one-hot encode the output\n# Note: We're also turning the data into numpy arrays, in order to train the model in Keras\nfeatures = np.array(train_data.drop('admit', axis=1))\ntargets = np.array(keras.utils.to_categorical(train_data['admit'], 2))\nfeatures_test = np.array(test_data.drop('admit', axis=1))\ntargets_test = np.array(keras.utils.to_categorical(test_data['admit'], 2))\n\nprint(features[:10])\nprint(targets[:10])", "Using TensorFlow backend.\n" ] ], [ [ "## Defining the model architecture\nHere's where we use Keras to build our neural network.", "_____no_output_____" ] ], [ [ "# Imports\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout, Activation\nfrom keras.optimizers import SGD\nfrom keras.utils import np_utils\n\n# Building the model\nmodel = Sequential()\nmodel.add(Dense(128, activation='relu', input_shape=(6,)))\nmodel.add(Dropout(.2))\nmodel.add(Dense(64, activation='relu'))\nmodel.add(Dropout(.1))\nmodel.add(Dense(2, activation='softmax'))\n\n# Compiling the model\nmodel.compile(loss = 'categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\nmodel.summary()", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_1 (Dense) (None, 128) 896 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 128) 0 \n_________________________________________________________________\ndense_2 (Dense) (None, 64) 8256 \n_________________________________________________________________\ndropout_2 (Dropout) (None, 64) 0 \n_________________________________________________________________\ndense_3 (Dense) (None, 2) 130 \n=================================================================\nTotal params: 9,282\nTrainable params: 9,282\nNon-trainable params: 0\n_________________________________________________________________\n" ] ], [ [ "## Training the model", "_____no_output_____" ] ], [ [ "# Training the model\nmodel.fit(features, targets, epochs=200, batch_size=100, verbose=0)", "_____no_output_____" ] ], [ [ "## Scoring the model", "_____no_output_____" ] ], [ [ "# Evaluating the model on the training and testing set\nscore = model.evaluate(features, targets)\nprint(\"\\n Training Accuracy:\", score[1])\nscore = model.evaluate(features_test, targets_test)\nprint(\"\\n Testing Accuracy:\", score[1])", " 32/360 [=>............................] - ETA: 0s\n Training Accuracy: 0.722222222222\n32/40 [=======================>......] - ETA: 0s\n Testing Accuracy: 0.575\n" ] ], [ [ "## Challenge: Play with the parameters!\nYou can see that we made several decisions in our training. For instance, the number of layers, the sizes of the layers, the number of epochs, etc.\nIt's your turn to play with parameters! Can you improve the accuracy? The following are other suggestions for these parameters. We'll learn the definitions later in the class:\n- Activation function: relu and sigmoid\n- Loss function: categorical_crossentropy, mean_squared_error\n- Optimizer: rmsprop, adam, ada", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
4a7b9889e58094fda9668bf473ff2f0dbfa840ca
276,550
ipynb
Jupyter Notebook
Python/Datacamp/1.4_Statistical Thinking in Python-1/15_Datacamp_Statistical-Thinking-in-Python.ipynb
OmarKhanGithub/Data-Science
4ce35723415f7af8c0efbaf5179b5e6de1b6a594
[ "MIT" ]
1
2020-03-17T18:11:11.000Z
2020-03-17T18:11:11.000Z
Python/Datacamp/1.4_Statistical Thinking in Python-1/15_Datacamp_Statistical-Thinking-in-Python.ipynb
OmarKhanGithub/Data-Science
4ce35723415f7af8c0efbaf5179b5e6de1b6a594
[ "MIT" ]
null
null
null
Python/Datacamp/1.4_Statistical Thinking in Python-1/15_Datacamp_Statistical-Thinking-in-Python.ipynb
OmarKhanGithub/Data-Science
4ce35723415f7af8c0efbaf5179b5e6de1b6a594
[ "MIT" ]
null
null
null
221.417134
25,388
0.89847
[ [ [ "# Import plotting modules\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\n\ndf = [4.7, 4.5, 4.9, 4.0, 4.6, 4.5, 4.7, 3.3, 4.6, 3.9, 3.5, 4.2, 4.0, 4.7, 3.6, 4.4, 4.5, 4.1, 4.5, 3.9, 4.8, 4.0, 4.9, 4.7, 4.3, 4.4, 4.8, 5.0, 4.5, 3.5, 3.8, 3.7, 3.9, 5.1, 4.5, 4.5, 4.7, 4.4, 4.1, 4.0, 4.4, 4.6, 4.0, 3.3, 4.2, 4.2, 4.2, 4.3, 3.0, 4.1]\nversicolor_petal_length = np.array(df)\n\n# Set default Seaborn style\nsns.set()\n\n# Plot histogram of versicolor petal lengths\n_ = plt.hist(versicolor_petal_length, ec='white')\n\n# Show histogram\nplt.show()\n", "_____no_output_____" ], [ "# Plot histogram of versicolor petal lengths\n_ = plt.hist(versicolor_petal_length, ec='black')\n\n# Label axes\n_ = plt.xlabel('petal length (cm)')\n_ = plt.ylabel('count')\n\n# Show histogram\nplt.show()\n", "_____no_output_____" ], [ "# Import numpy\nimport numpy as np\n\n# Compute number of data points: n_data\nn_data = len(versicolor_petal_length)\nprint(n_data)\n# Number of bins is the square root of number of data points: n_bins\nn_bins = np.sqrt(n_data)\nprint(n_bins)\n# Convert number of bins to integer: n_bins\nn_bins = int(n_bins)\n\n# Plot the histogram\n_ = plt.hist(versicolor_petal_length, bins=n_bins, ec='black')\n\n# Label axes\n_ = plt.xlabel('petal length (cm)')\n_ = plt.ylabel('count')\n\n# Show histogram\nplt.show()\n", "50\n7.0710678118654755\n" ], [ "\nimport pandas as pd\n\nsepal1 = [5.1, 4.9, 4.7, 4.6, 5.0, 5.4, 4.6, 5.0, 4.4, 4.9, 5.4, 4.8, 4.8, 4.3, 5.8, 5.7, 5.4, 5.1, 5.7, 5.1, 5.4, 5.1, 4.6, 5.1, 4.8, 5.0, 5.0, 5.2, 5.2, 4.7, 4.8, 5.4, 5.2, 5.5, 4.9, 5.0, 5.5, 4.9, 4.4, 5.1, 5.0, 4.5, 4.4, 5.0, 5.1, 4.8, 5.1, 4.6, 5.3, 5.0, 7.0, 6.4, 6.9, 5.5, 6.5, 5.7, 6.3, 4.9, 6.6, 5.2, 5.0, 5.9, 6.0, 6.1, 5.6, 6.7, 5.6, 5.8, 6.2, 5.6, 5.9, 6.1, 6.3, 6.1, 6.4, 6.6, 6.8, 6.7, 6.0, 5.7, 5.5, 5.5, 5.8, 6.0, 5.4, 6.0, 6.7, 6.3, 5.6, 5.5, 5.5, 6.1, 5.8, 5.0, 5.6, 5.7, 5.7, 6.2, 5.1, 5.7, 6.3, 5.8, 7.1, 6.3, 6.5, 7.6, 4.9, 7.3, 6.7, 7.2, 6.5, 6.4, 6.8, 5.7, 5.8, 6.4, 6.5, 7.7, 7.7, 6.0, 6.9, 5.6, 7.7, 6.3, 6.7, 7.2, 6.2, 6.1, 6.4, 7.2, 7.4, 7.9, 6.4, 6.3, 6.1, 7.7, 6.3, 6.4, 6.0, 6.9, 6.7, 6.9, 5.8, 6.8, 6.7, 6.7, 6.3, 6.5, 6.2, 5.9]\nsepal2 = [3.5, 3.0, 3.2, 3.1, 3.6, 3.9, 3.4, 3.4, 2.9, 3.1, 3.7, 3.4, 3.0, 3.0, 4.0, 4.4, 3.9, 3.5, 3.8, 3.8, 3.4, 3.7, 3.6, 3.3, 3.4, 3.0, 3.4, 3.5, 3.4, 3.2, 3.1, 3.4, 4.1, 4.2, 3.1, 3.2, 3.5, 3.1, 3.0, 3.4, 3.5, 2.3, 3.2, 3.5, 3.8, 3.0, 3.8, 3.2, 3.7, 3.3, 3.2, 3.2, 3.1, 2.3, 2.8, 2.8, 3.3, 2.4, 2.9, 2.7, 2.0, 3.0, 2.2, 2.9, 2.9, 3.1, 3.0, 2.7, 2.2, 2.5, 3.2, 2.8, 2.5, 2.8, 2.9, 3.0, 2.8, 3.0, 2.9, 2.6, 2.4, 2.4, 2.7, 2.7, 3.0, 3.4, 3.1, 2.3, 3.0, 2.5, 2.6, 3.0, 2.6, 2.3, 2.7, 3.0, 2.9, 2.9, 2.5, 2.8, 3.3, 2.7, 3.0, 2.9, 3.0, 3.0, 2.5, 2.9, 2.5, 3.6, 3.2, 2.7, 3.0, 2.5, 2.8, 3.2, 3.0, 3.8, 2.6, 2.2, 3.2, 2.8, 2.8, 2.7, 3.3, 3.2, 2.8, 3.0, 2.8, 3.0, 2.8, 3.8, 2.8, 2.8, 2.6, 3.0, 3.4, 3.1, 3.0, 3.1, 3.1, 3.1, 2.7, 3.2, 3.3, 3.0, 2.5, 3.0, 3.4, 3.0]\npetal = [1.4, 1.4, 1.3, 1.5, 1.4, 1.7, 1.4, 1.5, 1.4, 1.5, 1.5, 1.6, 1.4, 1.1, 1.2, 1.5, 1.3, 1.4, 1.7, 1.5, 1.7, 1.5, 1.0, 1.7, 1.9, 1.6, 1.6, 1.5, 1.4, 1.6, 1.6, 1.5, 1.5, 1.4, 1.5, 1.2, 1.3, 1.5, 1.3, 1.5, 1.3, 1.3, 1.3, 1.6, 1.9, 1.4, 1.6, 1.4, 1.5, 1.4, 4.7, 4.5, 4.9, 4.0, 4.6, 4.5, 4.7, 3.3, 4.6, 3.9, 3.5, 4.2, 4.0, 4.7, 3.6, 4.4, 4.5, 4.1, 4.5, 3.9, 4.8, 4.0, 4.9, 4.7, 4.3, 4.4, 4.8, 5.0, 4.5, 3.5, 3.8, 3.7, 3.9, 5.1, 4.5, 4.5, 4.7, 4.4, 4.1, 4.0, 4.4, 4.6, 4.0, 3.3, 4.2, 4.2, 4.2, 4.3, 3.0, 4.1, 6.0, 5.1, 5.9, 5.6, 5.8, 6.6, 4.5, 6.3, 5.8, 6.1, 5.1, 5.3, 5.5, 5.0, 5.1, 5.3, 5.5, 6.7, 6.9, 5.0, 5.7, 4.9, 6.7, 4.9, 5.7, 6.0, 4.8, 4.9, 5.6, 5.8, 6.1, 6.4, 5.6, 5.1, 5.6, 6.1, 5.6, 5.5, 4.8, 5.4, 5.6, 5.1, 5.1, 5.9, 5.7, 5.2, 5.0, 5.2, 5.4, 5.1]\npetal2 = [0.2, 0.2, 0.2, 0.2, 0.2, 0.4, 0.3, 0.2, 0.2, 0.1, 0.2, 0.2, 0.1, 0.1, 0.2, 0.4, 0.4, 0.3, 0.3, 0.3, 0.2, 0.4, 0.2, 0.5, 0.2, 0.2, 0.4, 0.2, 0.2, 0.2, 0.2, 0.4, 0.1, 0.2, 0.1, 0.2, 0.2, 0.1, 0.2, 0.2, 0.3, 0.3, 0.2, 0.6, 0.4, 0.3, 0.2, 0.2, 0.2, 0.2, 1.4, 1.5, 1.5, 1.3, 1.5, 1.3, 1.6, 1.0, 1.3, 1.4, 1.0, 1.5, 1.0, 1.4, 1.3, 1.4, 1.5, 1.0, 1.5, 1.1, 1.8, 1.3, 1.5, 1.2, 1.3, 1.4, 1.4, 1.7, 1.5, 1.0, 1.1, 1.0, 1.2, 1.6, 1.5, 1.6, 1.5, 1.3, 1.3, 1.3, 1.2, 1.4, 1.2, 1.0, 1.3, 1.2, 1.3, 1.3, 1.1, 1.3, 2.5, 1.9, 2.1, 1.8, 2.2, 2.1, 1.7, 1.8, 1.8, 2.5, 2.0, 1.9, 2.1, 2.0, 2.4, 2.3, 1.8, 2.2, 2.3, 1.5, 2.3, 2.0, 2.0, 1.8, 2.1, 1.8, 1.8, 1.8, 2.1, 1.6, 1.9, 2.0, 2.2, 1.5, 1.4, 2.3, 2.4, 1.8, 1.8, 2.1, 2.4, 2.3, 1.9, 2.3, 2.5, 2.3, 1.9, 2.0, 2.3, 1.8]\nspecies = ['setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica']\n\ndf = list(zip(sepal1, sepal2, petal, petal2, species))\ndf = pd.DataFrame(df)\ndf.columns = ['sepal length (cm)', 'sepal width (cm)', 'petal length (cm)', 'petal width (cm)', 'species']\n\n# Create bee swarm plot with Seaborn's default settings\n_ = sns.swarmplot(x='species', y='petal length (cm)', data=df)\n\n# Label the axes\n_ = plt.xlabel('species')\n_ = plt.ylabel('petal length (cm)')\n\n# Show the plot\nplt.show()\n", "_____no_output_____" ], [ "def ecdf(data):\n \"\"\"Compute ECDF for a one-dimensional array of measurements.\"\"\"\n # Number of data points: n\n n = len(data)\n\n # x-data for the ECDF: x\n x = np.sort(data)\n\n # y-data for the ECDF: y\n y = np.arange(1, n+1) / n\n\n return x, y\n", "_____no_output_____" ], [ "# Compute ECDF for versicolor data: x_vers, y_vers\nx_vers, y_vers = ecdf(versicolor_petal_length)\n\n# Generate plot\n_ = plt.plot(x_vers, y_vers, marker='.', linestyle='none')\n\n# Label the axes\n_ = plt.xlabel('petal length (cm)')\n_ = plt.ylabel('ECDF')\n\n# Display the plot\nplt.show()\n\n# Plotting the ECDF\n# You will now use your ecdf() function to compute the ECDF for the \n# petal lengths of Anderson's Iris versicolor flowers. You will then plot the ECDF.\n# Recall that your ecdf() function returns two arrays \n# so you will need to unpack them. An example of such unpacking is x, y = foo(data), for some function foo().", "_____no_output_____" ], [ "setosa_petal_length = [1.4, 1.4, 1.3, 1.5, 1.4, 1.7, 1.4, 1.5, 1.4, 1.5, 1.5, 1.6, 1.4,\n 1.1, 1.2, 1.5, 1.3, 1.4, 1.7, 1.5, 1.7, 1.5, 1. , 1.7, 1.9, 1.6,\n 1.6, 1.5, 1.4, 1.6, 1.6, 1.5, 1.5, 1.4, 1.5, 1.2, 1.3, 1.5, 1.3,\n 1.5, 1.3, 1.3, 1.3, 1.6, 1.9, 1.4, 1.6, 1.4, 1.5, 1.4]\nversicolor_petal_length = [4.7, 4.5, 4.9, 4.0, 4.6, 4.5, 4.7, 3.3, 4.6, 3.9, 3.5, 4.2, 4.0, 4.7, 3.6, 4.4, 4.5, 4.1, 4.5, 3.9, 4.8, 4.0, 4.9, 4.7, 4.3, 4.4, 4.8, 5.0, 4.5, 3.5, 3.8, 3.7, 3.9, 5.1, 4.5, 4.5, 4.7, 4.4, 4.1, 4.0, 4.4, 4.6, 4.0, 3.3, 4.2, 4.2, 4.2, 4.3, 3.0, 4.1]\nvirginica_petal_length = [6. , 5.1, 5.9, 5.6, 5.8, 6.6, 4.5, 6.3, 5.8, 6.1, 5.1, 5.3, 5.5,\n 5. , 5.1, 5.3, 5.5, 6.7, 6.9, 5. , 5.7, 4.9, 6.7, 4.9, 5.7, 6. ,\n 4.8, 4.9, 5.6, 5.8, 6.1, 6.4, 5.6, 5.1, 5.6, 6.1, 5.6, 5.5, 4.8,\n 5.4, 5.6, 5.1, 5.1, 5.9, 5.7, 5.2, 5. , 5.2, 5.4, 5.1]\nsetosa_petal_length = np.array(setosa_petal_length)\n# do this for the other 2 ..............................\n\n# Compute ECDFs\nx_set, y_set = ecdf(setosa_petal_length)\nx_vers, y_vers = ecdf(versicolor_petal_length)\nx_virg, y_virg = ecdf(virginica_petal_length)\n\n# Plot all ECDFs on the same plot\n_ = plt.plot(x_set, y_set, marker='.', linestyle='none')\n_ = plt.plot(x_vers, y_vers, marker='.', linestyle='none')\n_ = plt.plot(x_virg, y_virg, marker='.', linestyle='none')\n\n# Annotate the plot\n_ = plt.legend(('setosa', 'versicolor', 'virginica'), loc='lower right')\n_ = plt.xlabel('petal length (cm)')\n_ = plt.ylabel('ECDF')\n\n# Display the plot\nplt.show()\n\n# ECDFs also allow you to compare two or more distributions \n# (though plots get cluttered if you have too many). Here, you will plot ECDFs \n# for the petal lengths of all three iris species. \n# You already wrote a function to generate ECDFs so you can put it to good use!", "_____no_output_____" ], [ "# Compute the mean\nmean_length_vers = np.mean(versicolor_petal_length)\n\n# Print the results with some nice formatting\nprint('I. versicolor:', mean_length_vers, 'cm')\n\n# The mean of all measurements gives an indication of \n# the typical magnitude of a measurement. It is computed using np.mean().", "I. versicolor: 4.26 cm\n" ], [ "# Specify array of percentiles: percentiles\npercentiles = np.array([2.5, 25, 50, 75, 97.5])\n\n# Compute percentiles: ptiles_vers\nptiles_vers = np.percentile(versicolor_petal_length, percentiles)\n\n# Print the result\nprint(ptiles_vers)\n\n# In this exercise, you will compute the percentiles of petal length of Iris versicolor.", "[3.3 4. 4.35 4.6 4.9775]\n" ], [ "# Plot the ECDF\n_ = plt.plot(x_vers, y_vers, '.')\n_ = plt.xlabel('petal length (cm)')\n_ = plt.ylabel('ECDF')\n\n# Overlay percentiles as red x's\n_ = plt.plot(ptiles_vers, percentiles/100, marker='D', color='red',\n linestyle='none')\n\n# Show the plot\nplt.show()\n\n# To see how the percentiles relate to the ECDF, you will plot the percentiles of \n# Iris versicolor petal lengths you calculated in the last exercise on the ECDF plot you generated in chapter 1. \n# The percentile variables from the previous exercise are available in the workspace as ptiles_vers and percentiles.", "_____no_output_____" ], [ "# Create box plot with Seaborn's default settings\n_ = sns.boxplot(x='species', y='petal length (cm)', data=df)\n\n# Label the axes\n_ = plt.xlabel('species')\n_ = plt.ylabel('petal length (cm)')\n\n# Show the plot\nplt.show()\n\n\n# Making a box plot for the petal lengths is unnecessary because the iris data set is \n# not too large and the bee swarm plot works fine. However, it is always good to get some practice. ", "_____no_output_____" ], [ "# Standard deviation is a reasonable metric for the typical spread of the data \n\n# Array of differences to mean: differences\ndifferences = versicolor_petal_length - np.mean(versicolor_petal_length)\n\n# Square the differences: diff_sq\ndiff_sq = differences**2\n\n# Compute the mean square difference: variance_explicit\nvariance_explicit = np.mean(diff_sq)\n\n# Compute the variance using NumPy: variance_np\nvariance_np = np.var(versicolor_petal_length)\n\n# Print the results\nprint(variance_explicit, variance_np)\n", "0.21640000000000004 0.21640000000000004\n" ], [ "# Compute the variance: variance\nvariance = np.var(versicolor_petal_length)\n\n# Print the square root of the variance\nprint(np.sqrt(variance))\n\n# Print the standard deviation\nprint(np.std(versicolor_petal_length))\n\n# the standard deviation is the square root of the variance\n# the variance is how far a set of random data points are spread out from the mean\n# The variance measures how far each number in the set is from the mean", "0.4651881339845203\n0.4651881339845203\n" ], [ "# A low standard deviation means that most of the numbers are very close to the average. \n# A high standard deviation means that the numbers are spread out.\n# Covariance of a point is the mean of the product of those differences, with respect to the mean of the x and mean \n# of the y axis\n# If covariance is positive, the point is positively correlated, (if its above the x mean and y mean) \n# If x is high and y is low or vice versa, then the point is negatively correlated\n\n# covariance / std of x * std of y = Pearson correpation p", "_____no_output_____" ], [ "versicolor_petal_width = np.array([1.4, 1.5, 1.5, 1.3, 1.5, 1.3, 1.6, 1. , 1.3, 1.4, 1. , 1.5, 1. ,\n 1.4, 1.3, 1.4, 1.5, 1. , 1.5, 1.1, 1.8, 1.3, 1.5, 1.2, 1.3, 1.4,\n 1.4, 1.7, 1.5, 1. , 1.1, 1. , 1.2, 1.6, 1.5, 1.6, 1.5, 1.3, 1.3,\n 1.3, 1.2, 1.4, 1.2, 1. , 1.3, 1.2, 1.3, 1.3, 1.1, 1.3])\n\n# Make a scatter plot\n_ = plt.plot(versicolor_petal_length, versicolor_petal_width,\n marker='.', linestyle='none')\n\n# Label the axes\n_ = plt.xlabel('petal length (cm)')\n_ = plt.ylabel('petal width (cm)')\n\n# Show the result\nplt.show()\n\n\n# When you made bee swarm plots, box plots, and ECDF plots in previous exercises, you compared \n# the petal lengths of different species of iris. But what if you want to compare \n# two properties of a single species? This is exactly what we will do in this exercise. We will make a scatter\n# plot of the petal length and width measurements of Anderson's Iris versicolor flowers. If the flower scales \n# (that is, it preserves its proportion as it grows), we would expect the length and width to be correlated.", "_____no_output_____" ], [ "# the highest variance in the variable x,\n# the highest covariance,\n# negative covariance?\n\n# Compute the covariance matrix: covariance_matrix\ncovariance_matrix = np.cov(versicolor_petal_length, versicolor_petal_width)\n\n# Print covariance matrix\nprint(covariance_matrix)\n\n# Extract covariance of length and width of petals: petal_cov\npetal_cov = covariance_matrix[0,1]\n\n# Print the length/width covariance\nprint(petal_cov)\n\n# The covariance may be computed using the Numpy function np.cov(). For example, we have two sets of \n# data x and y, np.cov(x, y) returns a 2D array where entries [0,1] and \n# [1,0] are the covariances. Entry [0,0] is the variance of the data in x, and entry [1,1] is the variance of \n# the data in y. This 2D output array is called the covariance matrix, since it organizes the self- and covariance.", "[[0.22081633 0.07310204]\n [0.07310204 0.03910612]]\n0.07310204081632653\n" ], [ "def pearson_r(x, y):\n \"\"\"Compute Pearson correlation coefficient between two arrays.\"\"\"\n # Compute correlation matrix: corr_mat\n corr_mat = np.corrcoef(x, y)\n\n # Return entry [0,1]\n return corr_mat[0,1]\n\n# Compute Pearson correlation coefficient for I. versicolor\nr = pearson_r(versicolor_petal_width, versicolor_petal_length)\n\n# Print the result\nprint(r)\n\n\n# Computing the Pearson correlation coefficient\n# As mentioned in the video, the Pearson correlation coefficient, also called the Pearson r, is \n# often easier to interpret than the covariance. It is computed using the np.corrcoef() function. Like np.cov(), \n# it takes two arrays as arguments and returns a 2D array. Entries [0,0] and [1,1] are necessarily equal to 1\n# (can you think about why?), and the value we are after is entry [0,1].\n\n# In this exercise, you will write a function, pearson_r(x, y) that takes in two arrays \n# and returns the Pearson correlation coefficient. You will then use this function to compute it for the\n# petal lengths and widths of I. versicolor.", "0.7866680885228169\n" ], [ "# Why do we do statistical inference?\n\n# To draw probabilistic conclusions about what we might expect if we collected the same data again.\n# To draw actionable conclusions from data.\n# To draw more general conclusions from relatively few data or observations.\n# Summary: Correct! Statistical inference involves taking your data to probabilistic \n# conclusions about what you would expect if you\n# took even more data, and you can make decisions based on these conclusions.\n\n# Seed the random number generator\nnp.random.seed(42)\n\n# Initialize random numbers: random_numbers\nrandom_numbers = np.empty(100000)\n\n# Generate random numbers by looping over range(100000)\nfor i in range(100000):\n random_numbers[i] = np.random.random()\n\n# Plot a histogram\n_ = plt.hist(random_numbers, ec='white')\n\n# Show the plot\nplt.show()\n", "_____no_output_____" ], [ "def perform_bernoulli_trials(n, p):\n \"\"\"Perform n Bernoulli trials with success probability p\n and return number of successes.\"\"\"\n # Initialize number of successes: n_success\n n_success = 0\n\n # Perform trials\n for i in range(n):\n # Choose random number between zero and one: random_number\n random_number = np.random.random()\n\n # If less than p, it's a success so add one to n_success\n if random_number < p:\n n_success += 1\n\n return n_success\n # The np.random module and Bernoulli trials\n# You can think of a Bernoulli trial as a flip of a possibly biased coin. Specifically, each coin flip \n# has a probability p of landing heads (success) and probability 1−p of landing tails (failure).\n# In this exercise, you will write a function to perform n Bernoulli trials, perform_bernoulli_trials(n, p),\n# which returns the number of successes out of n Bernoulli trials, each of which has probability p of success. \n# To perform each Bernoulli trial, \n# use the np.random.random() function, which returns a random number between zero and one.", "_____no_output_____" ], [ "# Seed random number generator\nnp.random.seed(42)\n\n# Initialize the number of defaults: n_defaults\nn_defaults = np.empty(1000)\n\n# Compute the number of defaults\nfor i in range(1000):\n n_defaults[i] = perform_bernoulli_trials(100, 0.05)\n\n# Plot the histogram with default number of bins; label your axes\n_ = plt.hist(n_defaults, normed=True)\n_ = plt.xlabel('number of defaults out of 100 loans')\n_ = plt.ylabel('probability')\n\n# Show the plot\nplt.show()\n\n# How many defaults might we expect?\n# Let's say a bank made 100 mortgage loans. It is possible that anywhere between 0 and 100 of the loans will be defaulted upon. \n# You would like to know the probability of getting a given number of defaults, given that the probability of a \n# default is p = 0.05. To investigate this, you will do a simulation. You will perform 100 Bernoulli trials using \n# the perform_bernoulli_trials() function you wrote in the previous exercise and record how many defaults we get. Here, a success \n# is a default. (Remember that the word \"success\" just means that the Bernoulli trial evaluates to True, i.e., \n# did the loan recipient default?) You will do this for another 100 Bernoulli trials. And again and again until we \n# have tried it 1000 times. Then, you will plot a histogram describing the probability of the number of defaults.", "_____no_output_____" ], [ "# Compute ECDF: x, y\nx, y = ecdf(n_defaults)\n\n# Plot the CDF with labeled axes\n_ = plt.plot(x, y, marker='.', linestyle='none')\n_ = plt.xlabel('number of defaults out of 100')\n_ = plt.ylabel('CDF')\n\n# Show the plot\nplt.show()\n\n# Compute the number of 100-loan simulations with 10 or more defaults: n_lose_money\nn_lose_money = np.sum(n_defaults >= 10)\n\n# Compute and print probability of losing money\nprint('Probability of losing money =', n_lose_money / len(n_defaults))\n", "_____no_output_____" ], [ "# Take 10,000 samples out of the binomial distribution: n_defaults\nn_defaults = np.random.binomial(n=100, p=0.05, size=10000)\n\n# Compute CDF: x, y\nx, y = ecdf(n_defaults)\n\n# Plot the CDF with axis labels\n_ = plt.plot(x, y, marker='.', linestyle='none')\n_ = plt.xlabel('number of defaults out of 100 loans')\n_ = plt.ylabel('CDF')\n\n# Show the plot\nplt.show()\n\n\n# Sampling out of the Binomial distribution\n# Compute the probability mass function for the number of defaults we would expect for 100 loans as in the last\n# section, but instead of simulating all of the Bernoulli trials, perform the sampling using np.random.binomial().\n# This is identical to the calculation you did in the last set of exercises using your custom-written \n# perform_bernoulli_trials() function, but far more computationally efficient. Given this extra efficiency, we will\n# take 10,000 samples instead of 1000. After \n# taking the samples, plot the CDF as last time. This CDF that you are plotting is that of the Binomial distribution.", "_____no_output_____" ], [ "# Compute bin edges: bins\nbins = np.arange(0, max(n_defaults) + 1.5) - 0.5\n\n# Generate histogram\n_ = plt.hist(n_defaults, normed=True, bins=bins)\n\n# Label axes\n_ = plt.xlabel('number of defaults out of 100 loans')\n_ = plt.ylabel('PMF')\n\n# Show the plot\nplt.show()\n\n# Plotting the Binomial PMF\n# As mentioned in the video, plotting a nice looking PMF requires a bit of matplotlib trickery that we will not \n# go into here. Instead, we will plot the PMF of the Binomial distribution as a histogram with skills you have already\n# learned. The trick is setting up the edges of the bins to pass to plt.hist() via the bins keyword argument. \n# We want the bins centered on the integers. So, the edges of the bins should be -0.5, 0.5, 1.5, 2.5, ... up to \n# max(n_defaults) + 1.5. You can generate an array like this using np.arange() and then subtracting 0.5 from the array.", "_____no_output_____" ], [ "# Draw 10,000 samples out of Poisson distribution: samples_poisson\nsamples_poisson = np.random.poisson(10, size=10000)\n\n# Print the mean and standard deviation\nprint('Poisson: ', np.mean(samples_poisson),\n np.std(samples_poisson))\n\n# Specify values of n and p to consider for Binomial: n, p\nn = [20, 100, 1000]\np = [0.5, 0.1, 0.01]\n\n# Draw 10,000 samples for each n,p pair: samples_binomial\nfor i in range(3):\n samples_binomial = np.random.binomial(n[i], p[i], size=10000)\n\n # Print results\n print('n =', n[i], 'Binom:', np.mean(samples_binomial),\n np.std(samples_binomial))\n\n# Relationship between Binomial and Poisson distributions\n# You just heard that the Poisson distribution is a limit of the Binomial distribution for rare events. \n# This makes sense if you think about the stories. Say we do a Bernoulli trial every minute for an hour, \n# each with a success probability of 0.1. We would do 60 trials, and the number of successes is Binomially distributed,\n# and we would expect to get about 6 successes. This is just like the Poisson story we discussed in the video,\n# where we get on average 6 hits on a website per hour. So, the Poisson distribution with arrival rate equal \n# to np approximates a Binomial distribution for n Bernoulli trials with probability p of success \n# (with n large and p small). Importantly, the Poisson distribution is often simpler to work \n# with because it has only one parameter instead of two for the Binomial distribution.\n\n", "Poisson: 10.0111 3.1747089299650764\nn = 20 Binom: 9.9606 2.2378220751435984\nn = 100 Binom: 10.0546 3.0155296118592503\nn = 1000 Binom: 10.0444 3.155982991082176\n" ], [ "# Possible Answers\n# Discrete uniform\n# Binomial\n# Poisson\n# Both Binomial and Poisson, though Poisson is easier to model and compute.\n# Both Binomial and Poisson, though Binomial is easier to model and compute.\n\n", "_____no_output_____" ], [ "# Correct! When we have rare events (low p, high n), the Binomial distribution is Poisson. \n# This has a single parameter, \n# the mean number of successes per time interval, in our case the mean number of no-hitters per season.", "_____no_output_____" ], [ "# Draw 10,000 samples out of Poisson distribution: n_nohitters\nn_nohitters = np.random.poisson(251/115, size=10000)\n\n# Compute number of samples that are seven or greater: n_large\nn_large = np.sum(n_nohitters >= 7)\n\n# Compute probability of getting seven or more: p_large\np_large = n_large / 10000\n\n# Print the result\nprint('Probability of seven or more no-hitters:', p_large)\n\n# 1990 and 2015 featured the most no-hitters of any season of baseball (there were seven). Given that \n# there are on average 251/115 no-hitters per season, what is the probability of having seven or more in a season?", "Probability of seven or more no-hitters: 0.0071\n" ], [ "# a discrete quantity is like a dice roll\n# a continuous quantity is like light\n\n# The value of the CDF at \n# x = 10 is 0.75, so the probability that x < 10 is 0.75. Thus, the probability that x > 10 is 0.25.", "_____no_output_____" ], [ "# Draw 100000 samples from Normal distribution with stds of interest: samples_std1, samples_std3, samples_std10\nsamples_std1 = np.random.normal(20, 1, size=100000)\nsamples_std3 = np.random.normal(20, 3, size=100000)\nsamples_std10 = np.random.normal(20, 10, size=100000)\n\n# Make histograms\n_ = plt.hist(samples_std1, bins=100, normed=True, histtype='step')\n_ = plt.hist(samples_std3, bins=100, normed=True, histtype='step')\n_ = plt.hist(samples_std10, bins=100, normed=True, histtype='step')\n\n# Make a legend, set limits and show plot\n_ = plt.legend(('std = 1', 'std = 3', 'std = 10'))\nplt.ylim(-0.01, 0.42)\nplt.show()\n\n# In this exercise, you will explore the Normal PDF and also learn a way to plot a PDF of a known distribution \n# using hacker statistics. Specifically, you will plot a Normal PDF for various values of the variance.\n\n# You can see how the different standard deviations result \n# in PDFS of different widths. The peaks are all centered at the mean of 20.", "_____no_output_____" ], [ "# Generate CDFs\nx_std1, y_std1 = ecdf(samples_std1)\nx_std3, y_std3 = ecdf(samples_std3)\nx_std10, y_std10 = ecdf(samples_std10)\n\n# Plot CDFs\n_ = plt.plot(x_std1, y_std1, marker='.', linestyle='none')\n_ = plt.plot(x_std3, y_std3, marker='.', linestyle='none')\n_ = plt.plot(x_std10, y_std10, marker='.', linestyle='none')\n\n# Make a legend and show the plot\n_ = plt.legend(('std = 1', 'std = 3', 'std = 10'), loc='lower right')\nplt.show()\n\n# Now that you have a feel for how the Normal PDF looks, let's consider\n# its CDF. Using the samples you generated in the last exercise\n# (in your namespace as samples_std1, samples_std3, and samples_std10), generate and plot the CDFs.\n\n# The CDFs all pass through the mean at the 50th percentile; the \n# mean and median of a Normal distribution are equal. The width of the CDF varies with the standard deviation.", "_____no_output_____" ], [ "belmont = [148.51, 146.65, 148.52, 150.7, 150.42000000000002, 150.88, 151.57, 147.54, 149.65, 148.74, 147.86, 148.75, 147.5, 148.26, 149.71, 146.56, 151.19, 147.88, 149.16, 148.82, 148.96, 152.02, 146.82, 149.97, 146.13, 148.1, 147.2, 146.0, 146.4, 148.2, 149.8, 147.0, 147.2, 147.8, 148.2, 149.0, 149.8, 148.6, 146.8, 149.6, 149.0, 148.2, 149.2, 148.0, 150.4, 148.8, 147.2, 148.8, 149.6, 148.4, 148.4, 150.2, 148.8, 149.2, 149.2, 148.4, 150.2, 146.6, 149.8, 149.0, 150.8, 148.6, 150.2, 149.0, 148.6, 150.2, 148.2, 149.4, 150.8, 150.2, 152.2, 148.2, 149.2, 151.0, 149.6, 149.6, 149.4, 148.6, 150.0, 150.6, 149.2, 152.6, 152.8, 149.6, 151.6, 152.8, 153.2, 152.4, 152.2]\nbelmont_no_outliers = np.array(belmont)\n\n# Compute mean and standard deviation: mu, sigma\nmu = np.mean(belmont_no_outliers)\nsigma = np.std(belmont_no_outliers)\n\n# Sample out of a normal distribution with this mu and sigma: samples\nsamples = np.random.normal(mu, sigma, size=10000)\n\n# Get the CDF of the samples and of the data\nx_theor, y_theor = ecdf(samples)\nx, y = ecdf(belmont_no_outliers)\n\n# Plot the CDFs and show the plot\n_ = plt.plot(x_theor, y_theor)\n_ = plt.plot(x, y, marker='.', linestyle='none')\n_ = plt.xlabel('Belmont winning time (sec.)')\n_ = plt.ylabel('CDF')\nplt.show()\n\n# Since 1926, the Belmont Stakes is a 1.5 mile-long race of 3-year old thoroughbred horses. \n# Secretariat ran the fastest Belmont Stakes in history in 1973. While that was the fastest year, 1970 was \n# the slowest because of unusually wet and sloppy conditions. With these two outliers removed from the data\n# set, compute the mean and standard deviation of the Belmont winners' times. Sample out of a Normal \n# distribution with this mean and standard deviation using the np.random.normal() function and plot a CDF.\n# Overlay the ECDF from the winning Belmont times. Are these close to Normally distributed?\n\n# Note: Justin scraped the data concerning the Belmont Stakes from the Belmont Wikipedia page.\n\n# The theoretical CDF and the ECDF of the data suggest that the winning Belmont times are, indeed, Normally \n# distributed. This also suggests that in the last 100 years or so, there have not been major\n# technological or training advances that have significantly affected the speed at which horses can run this race.", "_____no_output_____" ], [ "# What are the chances of a horse matching or beating Secretariat's record?\n# Assume that the Belmont winners' times are Normally distributed (with the 1970 and 1973 years removed), what \n# is the probability that the winner of a given Belmont Stakes will run it as fast or faster than Secretariat?\n\n# Take a million samples out of the Normal distribution: samples\nsamples = np.random.normal(mu, sigma, size=1000000)\n\n# Compute the fraction that are faster than 144 seconds: prob\nprob = np.sum(samples <= 144) / len(samples)\n\n# Print the result\nprint('Probability of besting Secretariat:', prob)\n\n# Great work! We had to take a million samples because the probability of \n# a fast time is very low and we had to be sure to sample enough.\n# We get that there is only a 0.06% chance of a horse running the Belmont as fast as Secretariat.", "Probability of besting Secretariat: 0.000633\n" ], [ "# Matching a story and a distribution\n# How might we expect the time between Major League no-hitters to be distributed? \n# Be careful here: a few exercises ago, we considered the probability distribution \n# for the number of no-hitters in a season. \n# Now, we are looking at the probability distribution of the time between no hitters.\n\n# Possible Answers\n# Normal\n# Exponential\n# Poisson\n# Uniform", "_____no_output_____" ], [ "# Waiting for the next Secretariat\n# Unfortunately, Justin was not alive when Secretariat ran the Belmont in 1973. \n# Do you think he will get to see a performance like that? \n# To answer this, you are interested in how many years you would expect to wait until you see another \n# performance like Secretariat's. How is the waiting time\n# until the next performance as good or better than Secretariat's distributed? Choose the best answer.\n\n# Possible Answers\n# Normal, because the distribution of Belmont winning times are Normally distributed.\n# Normal, because there is a most-expected waiting time, so there should be a single peak to the distribution.\n# Exponential: It is very unlikely for a horse to be faster than Secretariat, so the distribution should decay \n# away to zero for high waiting time.\n# Exponential: A horse as fast as Secretariat is a rare event, which can be modeled as a Poisson process,\n# and the waiting time between arrivals of a Poisson process is Exponentially distributed.\n\n# Correct! The Exponential distribution describes the waiting times between rare events, and Secretariat is rare!", "_____no_output_____" ], [ "def successive_poisson(tau1, tau2, size=1):\n \"\"\"Compute time for arrival of 2 successive Poisson processes.\"\"\"\n # Draw samples out of first exponential distribution: t1\n t1 = np.random.exponential(tau1, size=size)\n\n # Draw samples out of second exponential distribution: t2\n t2 = np.random.exponential(tau2, size=size)\n\n return t1 + t2\n", "_____no_output_____" ], [ "# If you have a story, you can simulate it!\n# Sometimes, the story describing our probability distribution does not \n# have a named distribution to go along with it. In these cases, fear not! \n# You can always simulate it. We'll do that in this and the next exercise.\n\n# In earlier exercises, we looked at the rare event of no-hitters in Major \n# League Baseball. Hitting the cycle is another rare baseball event. When a\n# batter hits the cycle, he gets all four kinds of hits, a single, double, \n# triple, and home run, in a single game. Like no-hitters, this can be modeled\n# as a Poisson process, so the time between hits of the cycle are also Exponentially distributed.\n\n# How long must we wait to see both a no-hitter and then a batter hit \n# the cycle? The idea is that we have to wait some time for the no-hitter,\n# and then after the no-hitter, we have to wait for hitting the cycle. Stated \n# another way, what is the total waiting time for the arrival of two different\n# Poisson processes? The total waiting time is the time waited for the no-hitter, \n# plus the time waited for the hitting the cycle.\n\n# Now, you will write a function to sample out of the distribution described by this story.", "_____no_output_____" ], [ "# Distribution of no-hitters and cycles\n# Now, you'll use your sampling function to compute the waiting time to observe a no-hitter and hitting of the cycle. \n# The mean waiting time for a no-hitter is 764 games, and the mean waiting time for hitting the cycle is 715 games.\n\n# Draw samples of waiting times\nwaiting_times = successive_poisson(764, 715, size=100000)\n\n# Make the histogram\n_ = plt.hist(waiting_times, bins=100, histtype='step',\n normed=True)\n\n# Label axes\n_ = plt.xlabel('total waiting time (games)')\n_ = plt.ylabel('PDF')\n\n# Show the plot\nplt.show()\n", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a7bb250701b16c3f9e814f808078911db51eb34
39,248
ipynb
Jupyter Notebook
old_notebooks/spike_analyses_models.ipynb
KirstieJane/NeuropixelAnalyses
d54f5dfeb5283c6639fb125ee3478781b9dfffba
[ "MIT" ]
1
2021-12-13T05:10:17.000Z
2021-12-13T05:10:17.000Z
old_notebooks/spike_analyses_models.ipynb
KirstieJane/NeuropixelAnalyses
d54f5dfeb5283c6639fb125ee3478781b9dfffba
[ "MIT" ]
null
null
null
old_notebooks/spike_analyses_models.ipynb
KirstieJane/NeuropixelAnalyses
d54f5dfeb5283c6639fb125ee3478781b9dfffba
[ "MIT" ]
null
null
null
32.463193
472
0.494063
[ [ [ "# Introduction\n## Research Question \n\nWhat is the information flow from visual stream to motor processing and how early in processing can we predict behavioural outcomes.\n- Can decoding models be trained by region \n- How accurate are the modeled regions at predicting a behaviour \n- Possible behaviours (correct vs. incorrect) \n- Movement of wheel", "_____no_output_____" ], [ "## Brief background\nThe Steinmetz (2018) dataset reported that neurons with action correlates are found globally and that neurons in nearly every brain region are non-selectively activated in the moments leading up to movement onset, however it is currently not known how the information integration occurs across the motor areas and how that integration gives rise to motor behaviour.\n\nNeuron population coding has been robustly used to decode motor behaviours across various species (Georgopoulos et al., 1986), and recent literature has suggested that motor preparation and planning uses distributed populations in corticomotor areas to plan motor movements. However this previous work has been limited by the number of electrodes and therefore areas measured in a single task.\n\nThe following assignment seeks to take advantage of the multi-array recording from the Steinmetz (2018) neuropixel data set to investigate temporal aspects of motor behaviours.", "_____no_output_____" ], [ "# Data Analyses\n:brain: :mouse: :brain:\n\n## Set Up", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport dataframe_image as dfi\nimport pathlib\nfrom matplotlib import rcParams \nfrom matplotlib import pyplot as plt\nimport emoji\n\nrcParams['figure.figsize'] = [15, 5]\nrcParams['font.size'] = 15\nrcParams['axes.spines.top'] = False\nrcParams['axes.spines.right'] = False\nrcParams['figure.autolayout'] = True", "_____no_output_____" ], [ "import os, requests\n\nfname = []\nfor i in range(3):\n fname.append('steinmetz_part%d.npz'%i)\nurl = ['https://osf.io/agvxh/download']\nurl.append('https://osf.io/uv3mw/download')\nurl.append('https://osf.io/ehmw2/download')\n\nfor i in range(len(url)):\n if not os.path.isfile(fname[i]):\n try:\n r = requests.get(url[i])\n except requests.ConnectionError:\n print(\"Data could not download!\")\n else:\n if r.status_code != requests.codes.ok:\n print(\"Data could not download!\")\n else:\n with open(fname[i], \"wb\") as fid:\n fid.write(r.content)\n \n\nsteinmetz_data = np.array([])\nfor i in range(len(fname)):\n steinmetz_data = np.hstack((steinmetz_data, np.load('steinmetz_part%d.npz'%i, allow_pickle=True)['dat']))", "_____no_output_____" ] ], [ [ "## Exploring the data", "_____no_output_____" ] ], [ [ "# choose one recording session (20) to get labels\nsession_20 = steinmetz_data[20]\nkeys = session_20.keys()\nprint(keys)", "_____no_output_____" ], [ "for key in session_20.keys():\n dataset_info = session_20[key]\n if isinstance (dataset_info, np.ndarray):\n print(key, dataset_info.shape, \" - array\")\n elif isinstance (dataset_info, list):\n print(key, len(dataset_info), \" - list\")\n else: \n print(key, type(dataset_info), \" - other\")", "_____no_output_____" ], [ "brain_areas = []\nfor i in range(steinmetz_data.shape[0]):\n unique_area = np.unique(steinmetz_data[i]['brain_area']) # check this line for the \n for u in unique_area:\n brain_areas.append(u)\nubs = list(np.unique(brain_areas))", "_____no_output_____" ], [ "table = pd.DataFrame(columns=['session', 'mouse_name', 'n_neuron'] + ubs)\n\nfor i in range(steinmetz_data.shape[0]):\n this_session: dict = {}\n unique_barea = list(np.unique(steinmetz_data[i]['brain_area']))\n this_session['session'] = i\n this_session['mouse_name'] = steinmetz_data[i]['mouse_name']\n this_session['n_neuron'] = steinmetz_data[i]['spks'].shape[0]\n this_session['n_trial'] = steinmetz_data[i]['spks'].shape[1]\n for ubrea in unique_barea:\n n_neuron, n_trial, _ = (steinmetz_data[i]['spks'][steinmetz_data[i]['brain_area'] == ubrea]).shape\n this_session[ubrea] = n_neuron\n\n table = table.append(this_session, ignore_index=True)\ntable = table.fillna(0)\npathlib.Path('/Users/sophiabatchelor/Code/SteinmetzAnalyses/Images').mkdir(parents=True, exist_ok=True)\ndfi.export(table, '/Users/sophiabatchelor/Code/SteinmetzAnalyses/Images/steinmetz_all_data_table.png', max_cols=77)", "_____no_output_____" ], [ "table", "_____no_output_____" ] ], [ [ "## Investigate Spiking Reponses", "_____no_output_____" ] ], [ [ "# groupings of brain regions\nbrain_regions = [\"vis ctx\", \"thal\", \"hipp\", \"other ctx\", \"midbrain\", \"basal ganglia\", \"cortical subplate\", \"other\"]\nbrain_groupings = [[\"VISa\", \"VISam\", \"VISl\", \"VISp\", \"VISpm\", \"VISrl\"], # visual cortex\n [\"CL\", \"LD\", \"LGd\", \"LH\", \"LP\", \"MD\", \"MG\", \"PO\", \"POL\", \"PT\", \"RT\", \"SPF\", \"TH\", \"VAL\", \"VPL\", \"VPM\"], # thalamus\n [\"CA\", \"CA1\", \"CA2\", \"CA3\", \"DG\", \"SUB\", \"POST\"], # hippocampal\n [\"ACA\", \"AUD\", \"COA\", \"DP\", \"ILA\", \"MOp\", \"MOs\", \"OLF\", \"ORB\", \"ORBm\", \"PIR\", \"PL\", \"SSp\", \"SSs\", \"RSP\",\" TT\"], # non-visual cortex\n [\"APN\", \"IC\", \"MB\", \"MRN\", \"NB\", \"PAG\", \"RN\", \"SCs\", \"SCm\", \"SCig\", \"SCsg\", \"ZI\"], # midbrain\n [\"ACB\", \"CP\", \"GPe\", \"LS\", \"LSc\", \"LSr\", \"MS\", \"OT\", \"SNr\", \"SI\"], # basal ganglia \n [\"BLA\", \"BMA\", \"EP\", \"EPd\", \"MEA\"] # cortical subplate\n ]", "_____no_output_____" ], [ "mouse_dict = {} # create a dictionary \n\nfor session, dat_i in enumerate(steinmetz_data):\n name = dat_i[\"mouse_name\"]\n if name not in mouse_dict.keys():\n mouse_dict[name] = [dat_i]\n\n else:\n lst = mouse_dict[name]\n lst.append(dat_i)\n mouse_dict[name] = lst", "_____no_output_____" ], [ "assigned_region = \"VISp\"\n\n\n# analyse for all runs of a single mouse\nfor mouse in [\"Cori\"]:\n mouse_data = mouse_dict[mouse] #list of the sessions corresponding to this mouse, [alldat[0], alldat[1], alldat[2]]\n num_sessions = len(mouse_dict[mouse])\n\n thing = None\n for trial in mouse_data:\n spk_trial = trial['spks']\n if assigned_region in trial[\"brain_area\"]:\n spk_trial_region = spk_trial[trial[\"brain_area\"] == assigned_region]\n\n # average over trials\n spk_trial_region_avg = np.mean(spk_trial_region, axis=1)\n\n # take only values that are average above 0.2\n spk_trial_region_avg_good = spk_trial_region_avg[np.mean(spk_trial_region_avg, axis=1) >= 0.2,:]\n\n if thing is not None:\n thing = np.concatenate((thing, spk_trial_region_avg_good))\n else:\n thing = spk_trial_region_avg_good\n\n plot = plt.figure()\n plt.plot(thing.T) \n plot.suptitle(\"High Spiking Neurons in Cori's Primary Visual Cortex\")\n plt.xlabel(\"Timebins\")\n plt.ylabel(\"Average Number of Spikes\")\n plt.savefig('/Users/sophiabatchelor/Code/SteinmetzAnalyses/Images/Plots/cori_v1_spks.png')\n plt.show(plot)", "_____no_output_____" ], [ "# Group The data by mouse\nfor session, dat_i in enumerate(steinmetz_data):\n name = dat_i[\"mouse_name\"]\n if name not in mouse_dict.keys():\n mouse_dict[name] = [dat_i]\n\n else:\n lst = mouse_dict[name]\n lst.append(dat_i)\n mouse_dict[name] = lst", "_____no_output_____" ], [ "names = []\nfor dat_i in steinmetz_data:\n name = dat_i[\"mouse_name\"]\n if name not in names:\n names.append(name)\nprint(\"Mice: {}\".format(names))", "Mice: ['Cori', 'Forssmann', 'Hench', 'Lederberg', 'Moniz', 'Muller', 'Radnitz', 'Richards', 'Tatum', 'Theiler']\n" ], [ "assigned_regions = ['CA1', 'CA3',\"VISp\", \"VISpm\", \"VISrl\", \"VISam\", \"VISa\", \"DG\", \"MD\", \"MOs\", \"MG\", \"MOp\" ,]\n # change this to be whichever regions are of interest\n # !! NOTE !! the order matters\n\n### Note ### \n# LIST OF AREAS \n# \"VISp\", \"VISpm\", \"VISI\", \"VISrl\", \"VISam\", \"VISa\", 'CA1', 'CA3', \"DG\", \"CP\", \"SCm\", \"SCs\", \"SNr\", \"SSp\", \"ACA\", \"ILA\", \"GPe\", \"ACB\", \"APN\", \"BLA\", \"LD\", \"LGd\", \"LP\", \"LS\", \"MD\", \"MG\", \"MOp\", \"MOs\", \"MRN\", \"OLF\", \"ORB\", \"PAG\", \"PL\", \"PO\", \"POL\", \"POST\", \"RSP\", \"RT\", \"SUB\", \"ZI\", \"VPL\", \"VPM\"\n# VISI is throwing an error", "_____no_output_____" ], [ "for assigned_region in assigned_regions:\n all_mice_names = []\n all_mice_lines = None\n\n for mouse in mouse_dict.keys():\n mouse_data = mouse_dict[mouse] \n num_sessions = len(mouse_dict[mouse])\n\n spk_all_sessions = None\n \n for session in mouse_data:\n spk_session = session['spks'] \n if assigned_region in session['brain_area']:\n spk_session_region = spk_session[session['brain_area'] == assigned_region]\n\n # average over trials\n spk_session_region_avg = np.mean(spk_session_region, axis=1)\n\n if spk_all_sessions is not None:\n spk_all_sessions = np.concatenate((spk_all_sessions, spk_session_region_avg))\n else:\n spk_all_sessions = spk_session_region_avg\n \n\n # average over all neurons\n if spk_all_sessions is not None:\n name_i = mouse\n all_mice_names.append(name_i)\n mouse_i = np.mean(spk_all_sessions, axis=0)\n mouse_i = np.expand_dims(mouse_i, 0)\n\n if all_mice_lines is not None:\n all_mice_lines = np.concatenate((all_mice_lines, mouse_i), axis = 0)\n else:\n all_mice_lines = mouse_i \n\n plot = plt.figure(figsize=(10, 5))\n plt.plot(all_mice_lines.T) # had to transpose so that time was on the x axis \n\n plot.suptitle(\"Average Spiking of {}\".format(assigned_region))\n plt.xlabel(\"Timebins\") # change axis labels if you need reminders\n plt.ylabel(\"Average Number of Spikes per time bin\")\n plt.legend(all_mice_names, loc = \"upper right\")\n pathlib.Path('/Users/sophiabatchelor/Code/SteinmetzAnalyses/Images/Plots').mkdir(parents=True, exist_ok=True)\n plt.savefig('/Users/sophiabatchelor/Code/SteinmetzAnalyses/Images/Plots/Plotof{}.png'.format(assigned_region))\n plt.show()", "_____no_output_____" ] ], [ [ "## Relationship between spiking and behaviour", "_____no_output_____" ] ], [ [ "# analyses for Lederberg : session 11\n\nsession_11 = steinmetz_data[11]\n\ndt = session_11['bin_size'] # 10ms bins\nNT = session_11['spks'].shape[-1]\n\n# ax = plt.subplot(1,5,1)\nresponse = session_11['response'] # right - nogo - left (-1, 0, 1)\nvis_right = session_11['contrast_right'] # 0 - low - high\nvis_left = session_11['contrast_left'] # 0 - low - high\navg_gocue = (np.mean(session_11[\"gocue\"]))\n\n\nplt.plot(dt * np.arange(NT), 1 / dt * session_11['spks'][:,response>=0].mean(axis=(0,1))) # left responses\nplt.plot(dt * np.arange(NT), 1 / dt * session_11['spks'][:,response<0].mean(axis=(0,1))) # right responses\nplt.plot(dt * np.arange(NT), 1 / dt * session_11['spks'][:,vis_right>0].mean(axis=(0,1))) # right stimuli\nplt.plot(dt * np.arange(NT), 1 / dt * session_11['spks'][:,vis_right==0].mean(axis=(0,1))) # left stimuli\nplt.axvline(avg_gocue, color='black')\n\nplt.title(\"Session 11 Spike Frequency\")\nplt.xlabel(\"Time (sec)\") # change axis labels if you need reminders\nplt.ylabel(\"Firing rate (Hz)\")\nplt.legend(['left resp', 'right resp', 'right stim', 'left stim', 'stimuli onset'], fontsize=14)\npathlib.Path('/Users/sophiabatchelor/Code/SteinmetzAnalyses/Images/Plots/ResponseSpikeAnalyses').mkdir(parents=True, exist_ok=True)\nplt.savefig('/Users/sophiabatchelor/Code/SteinmetzAnalyses/Images/Plots/ResponseSpikeAnalyses/session_11_spikes.png')\n\nplt.show()\n", "_____no_output_____" ], [ "regions = [\"vis ctx\", \"thal\", \"hipp\", \"other ctx\", \"midbrain\", \"basal ganglia\", \"cortical subplate\", \"other\"]\nbrain_groups = [[\"VISa\", \"VISam\", \"VISl\", \"VISp\", \"VISpm\", \"VISrl\"], # visual cortex\n [\"CL\", \"LD\", \"LGd\", \"LH\", \"LP\", \"MD\", \"MG\", \"PO\", \"POL\", \"PT\", \"RT\", \"SPF\", \"TH\", \"VAL\", \"VPL\", \"VPM\"], # thalamus\n [\"CA\", \"CA1\", \"CA2\", \"CA3\", \"DG\", \"SUB\", \"POST\"], # hippocampal\n [\"ACA\", \"AUD\", \"COA\", \"DP\", \"ILA\", \"MOp\", \"MOs\", \"OLF\", \"ORB\", \"ORBm\", \"PIR\", \"PL\", \"SSp\", \"SSs\", \"RSP\",\" TT\"], # non-visual cortex\n [\"APN\", \"IC\", \"MB\", \"MRN\", \"NB\", \"PAG\", \"RN\", \"SCs\", \"SCm\", \"SCig\", \"SCsg\", \"ZI\"], # midbrain\n [\"ACB\", \"CP\", \"GPe\", \"LS\", \"LSc\", \"LSr\", \"MS\", \"OT\", \"SNr\", \"SI\"], # basal ganglia \n [\"BLA\", \"BMA\", \"EP\", \"EPd\", \"MEA\"] # cortical subplate\n ]", "_____no_output_____" ], [ "num_good_areas = 4 # only the top 4 regions are in this particular mouse\nneurons = len(session_11['brain_area']) # gives the number of neurons\ngood_areas = num_good_areas * np.ones(neurons, ) # note: last brain region is \"other\"\n\nfor i in range(num_good_areas):\n good_areas[np.isin(session_11['brain_area'], brain_groups[i])] = i # assign a number to each region", "_____no_output_____" ], [ "# Neural response to visual stimuli\n\nfor i in range(num_good_areas):\n fig, axs = plt.subplots(sharey = True)\n plt.plot(1 / dt * session_11['spks'][good_areas == i][:,np.logical_and(vis_left == 0, vis_right > 0)].mean(axis=(0,1)))\n plt.plot(1 / dt * session_11['spks'][good_areas == i][:,np.logical_and(vis_left == 0, vis_right == 0)].mean(axis=(0,1)))\n plt.plot(1 / dt * session_11['spks'][good_areas == i][:,np.logical_and(vis_left > 0, vis_right == 0)].mean(axis=(0,1)))\n plt.plot(1 / dt * session_11['spks'][good_areas == i][:,np.logical_and(vis_left > 0, vis_right > 0)].mean(axis=(0,1)))\n \n fig.suptitle('{} response to visual stimuli'.format(regions[i]))\n plt.xlabel('Time (ms)')\n plt.ylabel('Spike rate (Hz)')\n plt.legend(['right cue', 'left cue', 'no_cue', 'spike response any cue'], fontsize=12)\n plt.savefig('/Users/sophiabatchelor/Code/SteinmetzAnalyses/Images/Plots/ResponseSpikeAnalyses/session11_{}_vep.png'.format(regions[i]))", "_____no_output_____" ] ], [ [ "## Now let's model", "_____no_output_____" ] ], [ [ "print(emoji.emojize(':bug: :bug: :bug: :bug: :bug: :bug: :bug: :bug: :bug: :bug: :bug: :bug: :bug: :bug: :bug: :bug: :bug: :bug:')) ", "🐛 🐛 🐛 🐛 🐛 🐛 🐛 🐛 🐛 🐛 🐛 🐛 🐛 🐛 🐛 🐛 🐛 🐛\n" ], [ "from sklearn.linear_model import LogisticRegression\nfrom sklearn import metrics\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import cross_val_score", "_____no_output_____" ], [ "session_data = steinmetz_data[11]\nnum_timebins = session_data['spks'].shape[2]\nnum_trials = session_data['spks'].shape[1]\nmove = session_data['response'] # right - nogo - left (-1, 0, 1)\nregions = np.unique(session_data['brain_area'])\nspikes_in_a_region = {}\nfor region in regions:\n spikes_in_a_region[region] = session_data['spks'][np.where(session_data['brain_area']==region)]", "_____no_output_____" ], [ "session_spikes = session_data['spks']\nquick_info = session_spikes.shape\nprint(\"Number of neurons recorded in all sessions: {}, Number of Trials: {}, Number of timebins: {}\".format(quick_info[0], quick_info[1], quick_info[2]))\n", "Number of neurons recorded in all sessions: 698, Number of Trials: 340, Number of timebins: 250\n" ], [ "Y = (move != 0).astype(int) # boolean true \nY # 1D array", "_____no_output_____" ], [ "target_regions = spikes_in_a_region.keys()\nscores = np.zeros((len(target_regions),num_timebins))\nfor target_regions,(, spikes) in enumerate(spikes_in_a_region.items()):\n for t in range(num_timebins):\n X = spikes[:,:,t].T\n", "_____no_output_____" ], [ "X.shape", "_____no_output_____" ], [ "# check if the function is actually reading in the files okay\nalldata = np.array([])\nfor j in range(2):\n alldata = np.hstack((alldata, np.load('/Users/sophiabatchelor/Code/SteinmetzAnalyses/steinmetz_part%d.npz'%(j+1), allow_pickle=True)['dat']))\ndata = alldata[11]\nprint(data.keys())", "dict_keys(['spks', 'wheel', 'pupil', 'response', 'response_time', 'bin_size', 'stim_onset', 'contrast_right', 'contrast_left', 'brain_area', 'feedback_time', 'feedback_type', 'gocue', 'mouse_name', 'date_exp', 'trough_to_peak', 'active_trials', 'contrast_left_passive', 'contrast_right_passive', 'spks_passive', 'pupil_passive', 'wheel_passive', 'prev_reward', 'ccf', 'ccf_axes', 'cellid_orig', 'reaction_time', 'face', 'face_passive', 'licks', 'licks_passive'])\n" ], [ "## BUGS ALL THE WAY DOWN \n\n# Note: data isn't the same shape \n\n### nextsteps: ###\n# - strip back the functions \n# - reshape or Transpose data", "_____no_output_____" ], [ "def prepare_data(session=11):\n model_data = np.array([])\n for j in range(2):\n model_data = np.hstack((model_data, np.load('/Users/sophiabatchelor/Code/SteinmetzAnalyses/steinmetz_part%d.npz'%(j+1), allow_pickle=True)['dat']))\n data = model_data[session]\n num_trials = session_data['spks'].shape[1]\n n_timebins = data['spks'].shape[2]\n move = session_data['response'] # right - nogo - left (-1, 0, 1)\n regions = np.unique(data['brain_area'])\n spikes_per_region = dict()\n for region in regions:\n spikes_per_region[region] = data['spks'][np.where(data['brain_area']==region)]\n return spikes_per_region, labels, n_timebins", "_____no_output_____" ], [ "def simple_decoder(session=11):\n model = LogisticRegression(penalty='l2',multi_class='ovr',solver='liblinear')\n spikes_per_region, Y, n_timebins = prepare_data(session=session)\n regions = spikes_per_region.keys()\n scores = np.zeros((len(regions),n_timebins))\n for region,(_, spikes) in enumerate(spikes_per_region.items()):\n for t in range(n_timebins):\n X = spikes[:,:,t].T\n x = X.transpose()\n score = cross_val_score(model, x, Y, cv=5)\n scores[region,t] = np.mean(score)\n return scores", "_____no_output_____" ], [ "def plot_scores(scores,session,save_name):\n spikes_per_region, _, n_timebins = prepare_data(session=session)\n regions = spikes_per_region.keys()\n \n fig = plt.figure(figsize=[10,5])\n contour = plt.contourf(scores)\n cb = fig.colorbar(contour, shrink = 0.5, aspect = 5)\n cb.set_label('Accuracy')\n tick_marks = np.arange(len(regions))\n plt.yticks(tick_marks, regions)\n plt.xticks(np.arange(0,n_timebins,20), np.arange(0,n_timebins*10,200))\n plt.ylabel('Brain area')\n plt.xlabel('Time (ms)')\n plt.tight_layout()\n plt.show()\n# TODO create a dir in Images for the plot to be saved in \n# fig.savefig(<path> + save_name, format='png')", "_____no_output_____" ], [ "if __name__==\"__main__\":\n scores = simple_decoder(session = 12)\n plot_scores(scores,12,'scores_s12.png')", "_____no_output_____" ], [ "def plot_all_sessions():\n n_sessions = 39\n for i in range(n_sessions):\n scores = simple_decoder(session = i)\n plot_scores(scores,i,'scores_s%d.png'%i)\n \nif __name__==\"__main__\":\n scores = simple_decoder(session=12)\n plot_scores(scores,12,'scores_s12.png')\n # plot_all_sessions()", "_____no_output_____" ], [ "\nfor trial in range(num_trials): # this will run 340 times \n# find the avg spike per time bin \n \n # get the avg spk_per_time_bin\n list_of_spikes_in_a_trial = []\n list_spk_avg_per_trial= []\n for t in range(num_timebins):\n spikes_in_a_trial = session_spikes[t,t,:]\n list_of_spikes_in_a_trial.append(spikes_in_a_trial)\n trial_spk_avg = np.mean(spikes_in_a_trial)\n list_spk_avg_per_trial.append(trial_spk_avg)", "_____no_output_____" ], [ "list_of_spikes_in_a_trial = []\nlist_spk_avg_per_trial= []\n\nfor t in range(num_timebins):\n spikes_in_a_trial = session_spikes[t,t,:]\n list_of_spikes_in_a_trial.append(spikes_in_a_trial)\n trial_spk_avg = np.mean(spikes_in_a_trial)\n list_spk_avg_per_trial.append(trial_spk_avg)", "_____no_output_____" ], [ "len(list_of_spikes_in_a_trial)", "_____no_output_____" ], [ "num_trials", "_____no_output_____" ], [ "avg_spks_per_timebin = []\n\nfor a_session in range(num_sessions):\n spikes_in_bin = session_spikes[c,:,t]\n avg_per_bin = np.mean(spikes_in_bin)\n avg_spks_per_timebin.append(avg_per_bin)", "_____no_output_____" ], [ "avg_spks_per_timebin", "_____no_output_____" ], [ "for t in range(num_timebins):\n test_spks = test_set[t,t,:]\ntest_spks", "_____no_output_____" ], [ "print(test_spks.ndim)\nprint(test_spks.shape)", "_____no_output_____" ], [ "for t in range(num_timebins):\n test_bin_piece = test_set[:,:,t]\ntest_bin_piece", "_____no_output_____" ], [ "print(test_bin_piece.ndim)\nprint(test_bin_piece.shape)", "_____no_output_____" ], [ "hat1 = test_set[0,0,:]\nhat1\n\n# 250 results -> these are the spikes in session", "_____no_output_____" ], [ "hat2 = test_set[1,1,:]\nhat2", "_____no_output_____" ], [ "hat3 = eqtest_setp[2,2,:]\nhat3", "_____no_output_____" ], [ "np.mean(hat1)", "_____no_output_____" ], [ "np.mean(hat2)", "_____no_output_____" ], [ "np.mean(hat3)", "_____no_output_____" ], [ "list_the_spikes_in_a_session = []\nlist_bin_means = []\n\nfor t in range(num_timebins):\n the_spikes_in_a_session = test_set[t,t,:]\n list_the_spikes_in_a_session.append(the_spikes_in_a_session)\n avg_per_bin = np.mean(the_spikes_in_a_session)\n list_bin_means.append(avg_per_bin)", "_____no_output_____" ], [ "print(list_the_spikes_in_a_session)", "_____no_output_____" ], [ "len(list_the_spikes_in_a_session)", "_____no_output_____" ], [ "Lederb = table.iloc[11]\nLederb", "_____no_output_____" ], [ "list_bin_means", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a7bb39c4cef08bec1b7e1a5cc513f0f0e559826
988,621
ipynb
Jupyter Notebook
jupyter/tutorials/tf_kmeans_miklgr500.ipynb
ivan-magda/mlcourse_open_homeworks
bc67fe6b872655e8e5628ec14b01fde407c5eb3c
[ "MIT" ]
1
2018-10-24T08:35:29.000Z
2018-10-24T08:35:29.000Z
jupyter/tutorials/tf_kmeans_miklgr500.ipynb
ivan-magda/mlcourse_open_homeworks
bc67fe6b872655e8e5628ec14b01fde407c5eb3c
[ "MIT" ]
null
null
null
jupyter/tutorials/tf_kmeans_miklgr500.ipynb
ivan-magda/mlcourse_open_homeworks
bc67fe6b872655e8e5628ec14b01fde407c5eb3c
[ "MIT" ]
3
2019-10-03T22:32:24.000Z
2021-01-13T10:09:22.000Z
864.935258
385,226
0.951707
[ [ [ "<center>\n<img src=\"../../img/ods_stickers.jpg\">\n## Открытый курс по машинному обучению\n<center>Автор материала: Michael Kazachok (@miklgr500)", "_____no_output_____" ], [ "# <center>Другая сторона tensorflow:KMeans", "_____no_output_____" ], [ "## <center>Введение", "_____no_output_____" ], [ "<p style=\"text-indent:20px;\"> Многие знают <strong>tensorflow</strong>, как одну из лучших библиотек для обучения нейронных сетей, но в последнее время tensorflow довольно сильно вырос. Появились новые <a href='https://www.tensorflow.org/programmers_guide/estimators'>Estimators</a>, которые более удобны, чем старая парадигма, являющаяся фундаментом для новой.</p>\n<p style=\"text-indent:20px;\"> На сайте <a href = 'https://www.tensorflow.org/'>tensorflow</a> будет хорошая инструкция по установке под определенную операционную ситему и возможностях использование <a href = 'https://ru.wikipedia.org/wiki/GPGPU'>GPGPU</a>.Я не буду грузить данную работу особенностями \"кухни\" tensorflow (поэтому советую почитать хотябы основы в <a href='https://www.tensorflow.org/tutorials/'>официальном тьюториале</a> и посмотреть <a href='https://github.com/aymericdamien/TensorFlow-Examples'>TensorFlow Tutorial and Examples for Beginners with Latest APIs</a>; там же есть примеры, которые помогут в дальнейшем в изучении нейронных сетей), а я пройдусь по уже прошитым в этой либе алгоритмам крастеризации(а их фактически пока только два).</p>\n<p style=\"text-indent:20px;\"> При этом будет использоваться набор данных с Kaggel для соревнования <a href = 'https://www.kaggle.com/chicago/chicago-taxi-rides-2016'>Chicago Taxi Rides 2016</a>, который использовался в одной из домашек (<span style='color:green'>рекомендую использовать не более двух месяцев</span>).</p>\n<p style=\"text-indent:20px;\"> Применение простейшего алгоритма кластеризации в tensorflow будет сопроваждаться рассмотрением вопросов изящной визуализации (которую я увидел этим летом на соревнований Kaggle <a href = 'https://www.kaggle.com/c/nyc-taxi-trip-duration'>New York City Taxi Trip</a>), представленой <a href = 'https://www.kaggle.com/drgilermo'>DrGuillermo</a> и <a href = 'https://www.kaggle.com/maheshdadhich'>BuryBuryZymon</a> в их работах <a href = 'https://www.kaggle.com/drgilermo/dynamics-of-new-york-city-animation'>Dynamics of New York city - Animation</a> и <a href = 'https://www.kaggle.com/maheshdadhich/strength-of-visualization-python-visuals-tutorial'>Strength of visualization-python visuals tutorial</a> на соревновании.</p>", "_____no_output_____" ], [ "<p style=\"text-indent:20px;\"><i>P.S. На написание данного тьюториала автора сподвигло довольно плохая освещенность возможностей tensorflow для создания уже довольно хорошо всем известных простых алгоритмов машинного обучения, которые для определенных задачах могут быть более эфективны, чем сложные алгоритмы машинного обучения.</i></p>", "_____no_output_____" ], [ "## <center>Подключение используемых в работе библиотек и загрузка данных", "_____no_output_____" ] ], [ [ "FIG_SIZE = (12,12)\nPATH_DATA_JSON = '../../data/column_remapping.json'\nPATH_DATA_CSV = '../../data/chicago_taxi_trips_2016_*.csv'\nGIF_PATH = '../../img/animation.gif'\nKMEANS_GIF_PATH='../../img/kmeans_animation.gif'\nNUM_CLUSTERS = 5\nBATCH_SIZE = 5\nNUM_STEPS = 50\nLON_CONST = -87.623177\nLAT_CONST = 41.881832\nLON_ANI_CENTER = [-87.73, -87.60]\nLAT_ANI_CENTER = [41.85, 42.00]", "_____no_output_____" ], [ "import json\nimport pandas as pd\nfrom glob import glob\nfrom joblib import Parallel, delayed\n\nimport folium\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom matplotlib import animation\nfrom matplotlib.patches import Ellipse\nfrom IPython.display import HTML\nplt.rcParams.update({'figure.max_open_warning': 0})\n\nimport numpy as np\nimport tensorflow as tf\nfrom geopy.geocoders import Nominatim\n\nimport io\nimport base64\nfrom dateutil import parser\n\n%load_ext watermark", "_____no_output_____" ] ], [ [ "Версии основных библиотек и параметры системы.", "_____no_output_____" ] ], [ [ "%watermark -v -m -p numpy,pandas,matplotlib,tensorflow -g", "CPython 3.5.2\nIPython 6.1.0\n\nnumpy 1.13.3\npandas 0.20.3\nmatplotlib 2.1.0\ntensorflow 1.4.0-rc0\n\ncompiler : GCC 5.4.0 20160609\nsystem : Linux\nrelease : 4.10.0-37-generic\nmachine : x86_64\nprocessor : x86_64\nCPU cores : 4\ninterpreter: 64bit\nGit hash : f403a4676b99fce59d623f5adaf83f7af6b384ba\n" ] ], [ [ "Открываем данные за два первых месяца. Будте внимательны со ссылками на данные.", "_____no_output_____" ] ], [ [ "#ядро которое будем использовать \n#для загруски и преобработки данных за один месяц\ndef preproc_kernel(path):\n with open(PATH_DATA_JSON) as json_file:\n column_remapping = json.load(json_file)\n df = pd.read_csv(path)\n # в дальнейшем понадобяться только геоданные\n # и время начала поездки\n df = df.loc[:, [\n 'trip_start_timestamp',\n 'pickup_latitude',\n 'pickup_longitude',\n 'dropoff_latitude',\n 'dropoff_longitude']].dropna()\n geo_labels = ['pickup_latitude',\n 'pickup_longitude',\n 'dropoff_latitude',\n 'dropoff_longitude']\n for g in geo_labels:\n df[g] = df[g].apply(lambda x: float(column_remapping[g].get(str(int(x)))))\n return df\n\n\ndataset_files = sorted(glob(PATH_DATA_CSV))\n# выполняем загрузку данных параллельно\n# на двух ядрах, каждому по одному файлу\ndfs = Parallel(n_jobs=2)(delayed(preproc_kernel)(path) for path in dataset_files)\n# склеиваем данные\ndf = pd.concat(dfs, ignore_index=True)\ndf.head()", "_____no_output_____" ] ], [ [ "## <center> Визуализация данных", "_____no_output_____" ], [ "Произведем предварительную визуализацию всех гео данных и выявим их границы.", "_____no_output_____" ] ], [ [ "# соединяем гео данные для точек посадки и точек высадки\nlongitude = list(df.pickup_longitude)+list(df.dropoff_longitude)\nprint('max_long:'+str(max(longitude)))\nprint('min_long:'+str(min(longitude)))\nlatitude = list(df.pickup_latitude)+list(df.dropoff_latitude)\nprint('max_lat:'+str(max(latitude)))\nprint('min_lat:'+str(min(latitude)))\n\nloc_df = pd.DataFrame()\nloc_df['longitude'] = longitude\nloc_df['latitude'] = latitude", "max_long:-87.534902901\nmin_long:-87.913624596\nmax_lat:42.021223593\nmin_lat:41.660136051\n" ], [ "#производим визуализацию объединенных гео данных\nfig, ax = plt.subplots(1,1, figsize = FIG_SIZE)\nplt.plot(longitude, \n latitude, \n '.', \n color = 'orangered',\n markersize = 1.5, \n axes = ax, \n figure = fig\n )\nax.set_axis_off()\nplt.show();", "_____no_output_____" ] ], [ [ "<p style=\"text-indent:20px;\">Мало что можно сказать про количество кластеров из графика выше. Но если вывести рапределение по широте и долготе, то картина немного прояснится.</p>", "_____no_output_____" ] ], [ [ "fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, figsize=FIG_SIZE)\n\nsns.distplot(loc_df['longitude'], bins=300, kde=False, ax=ax1)\nsns.distplot(loc_df['latitude'], bins=300, kde=False, ax=ax2)\nplt.show();", "_____no_output_____" ] ], [ [ "<p style=\"text-indent:20px;\">Из графиков выше видно, что наибольший трафик приходится практически на центр города. При этом стоит отметить, наличее довольно сильно выделяющегося трафика на долготе -87.90, а по долготе правея центра выделятся три центра с ярко выраженным трафиков. Таким образом кроме одного основного яровыделяющего по трафику центра есть еще как миниму четыре центра, которые можно выделить в отдельный кластер. В итоге можно выделить пять кластеров, которые имеют ярковыраженый трафик.</p> ", "_____no_output_____" ], [ "\n## <center>Kmean в tensorflow", "_____no_output_____" ], [ "<p style=\"text-indent:20px;\">Пожалуй это один из самых востребованных алгоритмов кластеризации на на данный момент. Не думаю, что тут стоит излагать теорию (учитывая, что она затрагивалась в <a href='https://habrahabr.ru/company/ods/blog/325654/'>лекции курса</a>), если кто-то хочет почитать что-то еще по данному алгоритму и по кластеризации в целом, то я пожалуй могу посоветовать <a href='http://www.machinelearning.ru/wiki/images/2/28/Voron-ML-Clustering-slides.pdf'>лекции К.В.Воронцова</a>.</p>", "_____no_output_____" ] ], [ [ "# формируем массив с данными в нужном формате\n# т.е. формируем пары [lon, lat]\n# Для правильной работы алгоритма\n# неообходимо омязательно избавиться от\n# постоянной компаненты\ndata = [[(lon-LON_CONST), (lat-LAT_CONST)] for lon, lat in zip(longitude, latitude)]\ndata = np.array(data)", "_____no_output_____" ] ], [ [ "<p style=\"text-indent:20px;\">В качестве основы выберем уже прошитый в tensorflow алгоритм <a href='https://www.tensorflow.org/versions/master/api_docs/python/tf/contrib/factorization/KMeans'>KMeans</a>(<a href='https://github.com/tensorflow/tensorflow/blob/r1.4/tensorflow/contrib/factorization/python/ops/clustering_ops.py'>люблю открытый код</a>). Те кто разобрал открытый код, мог заметить, что из большого набора функций вызвать можем только <i>training_graph(self)</i>. Обратите внимание возвращается ли в вашей версии tensorflow данная функция переменную <i>cluster_centers_var</i>(в 1.3 она не возвращается).</p>", "_____no_output_____" ] ], [ [ " def KMeans_clustering(num_clusters=NUM_CLUSTERS, flag_print=True):\n # создаем placeholder X\n # подставляя его вместо каких-то знаений\n # мы говорим вычислительному графу\n # что эти значения будут предоставлены потом: \n # в процессе обучения и/или инициализации\n X = tf.placeholder(tf.float32, shape=[None, 2])\n\n # производим построение вычислительного графа для KMeans\n kmeans = tf.contrib.factorization.KMeans(\n inputs=X,\n num_clusters=num_clusters,\n initial_clusters=\"kmeans_plus_plus\",\n mini_batch_steps_per_iteration=BATCH_SIZE,\n random_seed=29,\n use_mini_batch=True\n )\n \n (all_scores,cluster_idx, scores,cluster_centers_initialized,\\\n cluster_centers_var,init_op,train_op) = kmeans.training_graph()\n \n # т.к. изначально возвращается tuple\n # то берем только первый его член\n cluster_idx = cluster_idx[0]\n # производим расчет средней дистанции \n # точек до своего кластера\n avg_distance = tf.reduce_mean(scores)\n\n # создание сессии и инициальзация\n init_vars = tf.global_variables_initializer()\n sess = tf.Session()\n sess.run(init_vars)\n sess.run(init_op, feed_dict={X: data})\n\n # пошагово обучаем модель\n # получая на каждом шаге\n # d:среднюю дистанцию от точки \n # до центра своего кластера\n #----------------------------\n # задаем критерии остановки\n\n for i in range(1,NUM_STEPS+1):\n _, d, idx, cl_c = sess.run([train_op, \n avg_distance,\n cluster_idx,\n cluster_centers_var],\n feed_dict={X: data}\n )\n \n if (i%10==0)&(flag_print):\n print('Step %i, Average Distance %.8f'%(i, d))\n sess.close()\n return d,idx,cl_c", "_____no_output_____" ] ], [ [ "<p style=\"text-indent:20px;\">Визуализируем работу алгоритма, произведя инициализацию всех кластеров в координате [LON_CONST, LAT_CONST], являющеся центром города.</p>", "_____no_output_____" ] ], [ [ "# сделаем анимацию обучения\nnum_clusters = 8\n\n# массив для инициализации кластеров\n# в точке [LON_CONST, LAT_CONST], но \n# т.к. у нас все данные смещенны на \n# значение данной координаты,\n# то инициализацию необходимо провести \n# в точке [0, 0]\ninit_cl = np.array([[0, 0] for i in range(num_clusters)],\n dtype=np.float32\n )\nX = tf.placeholder(tf.float32, shape=[None, 2])\n# производим построение вычислительного графа для KMeans\nkmeans = tf.contrib.factorization.KMeans(\n inputs=X,\n num_clusters=num_clusters,\n initial_clusters=init_cl,\n mini_batch_steps_per_iteration=2,\n random_seed=29,\n use_mini_batch=False\n)\n \n(all_scores,cluster_idx, scores,cluster_centers_initialized,\\\n cluster_centers_var,init_op,train_op) = kmeans.training_graph()\n# т.к. изначально возвращается tuple\n# то берем только первый его член\ncluster_idx = cluster_idx[0]\navg_distance = tf.reduce_mean(scores)\n\n# создание сессии и инициальзация\ninit_vars = tf.global_variables_initializer()\nsess = tf.Session()\nsess.run(init_vars)\nsess.run(init_op, feed_dict={X: data})\nfig, ax = plt.subplots(1,1, figsize = FIG_SIZE)\n# задаем функцию, которую передадим в animation.FuncAnimation\n# эта функция будет производить просчет полученого графика\n# на каждом шагу, но так как mini_batch_steps_per_iteration=2\n# то изменение будут каждые 2 шага, всего шагов будет 10\n# их мы непосредственно будем задавать в FuncAnimation\n# в виде массива и FuncAnimation пошагово будет передовать\n# заданные значения в animate_kmeans\ndef animate_kmeans(step):\n _, d, idx, cl_c = sess.run([train_op, \n avg_distance,\n cluster_idx,\n cluster_centers_var],\n feed_dict={X: data}\n )\n # для упрощения работы с полученными данными после обучения\n # создается DataFrame, который в конце кода будет удален\n # данное решение может быть не совсем оптимально\n # оно просто упрощает жизнь вашему слуге =)\n loc_df['labels'] = idx\n cl_df = pd.DataFrame()\n cl_df['longitude'] = cl_c[:,0]+LON_CONST\n cl_df['latitude'] = cl_c[:,1]+LAT_CONST\n cl_df['labels'] = cl_df.index\n # обязательно чистим предыдущий график\n ax.clear()\n ax.set_title('Step: '+str(step))\n for l in cl_df['labels']:\n ax.plot(loc_df.loc[loc_df['labels'] == l, 'longitude'], \n loc_df.loc[loc_df['labels'] == l, 'latitude'], \n '.',\n markersize = 1.5\n )\n ax.plot(cl_df.loc[cl_df['labels'] == l, 'longitude'], \n cl_df.loc[cl_df['labels'] == l, 'latitude'], \n 'ro'\n )\n ax.annotate(s=str(l),\n xy=(cl_df.loc[cl_df['labels'] == l, 'longitude'], \n cl_df.loc[cl_df['labels'] == l, 'latitude'])\n )\n \n ax.set_axis_off()\n del cl_df\n \nani = animation.FuncAnimation(fig,\n animate_kmeans,\n list(range(0, 20)),\n interval=500\n )\n# производим закрытие отрисованных графиков\nplt.close()\n# дириктори сохранения гифки\ngif_path = KMEANS_GIF_PATH\n# сохранение гифки\nani.save(gif_path,\n writer='imagemagick',\n fps=1\n )\n# открываем сохраненную гифку и производим ее дешифрование\n# для дальнейшего URL и подстановки их в HTML\nvideo = io.open(gif_path,\n 'r+b'\n ).read()\nencoded = base64.b64encode(video)\n# производим отрисовку анимации в notebook\nHTML(data='''<img src=\"data:image/gif;base64,{0}\"type=\"gif\"/>'''.format(\n encoded.decode('ascii'))) ", "_____no_output_____" ] ], [ [ "<p style=\"text-indent:20px;\">Видно что обновление происходит каждые 2 шага за счет установки mini_batch_steps_per_iteration=2. Вы можете поиграться с кодом выше! Выставте другую инициализацию(\"kmeans_plus_plus\",\"random\") или поиграйтесь с параметрами для mini_batch, а можно и вовсе изменить количество кластеров!</p>", "_____no_output_____" ], [ "<p style=\"text-indent:20px;\">Найдем оптимальное число кластеров по методу, который был предложен на лекции,а пока идут вычисления можно заварить чашечку кофе и изучить новый алгоритм =)<p>", "_____no_output_____" ] ], [ [ "n_cluster = range(1,15,1)\navg_distance = []\nfor i in n_cluster:\n d,idx,cl_c = KMeans_clustering(num_clusters=i, flag_print=False)\n avg_distance.append(d)", "_____no_output_____" ], [ "plt.plot([i for i in n_cluster], avg_distance, color = 'seagreen')\nplt.xlabel('number of cluster')\nplt.ylabel('avg_distance')\nplt.title('Optimal Number Of Cluster')\nplt.show();", "_____no_output_____" ] ], [ [ "<p style=\"text-indent:20px;\">Из графика видно, что ничего не видно=). Опять гадаем=) Я бы взять 4 кластера, и это довольно неплохо согласуется с предыдущей оценкой, поэтому возмем 5 кластеров(в данном случае лучше взять большее число, т.о. получится более детальная картина трафика).</p>", "_____no_output_____" ] ], [ [ "NUM_CLUSTERS = 5\n \nd,idx,cl_c = KMeans_clustering(num_clusters=NUM_CLUSTERS, flag_print=True)", "Step 10, Average Distance 0.00053107\nStep 20, Average Distance 0.00052832\nStep 30, Average Distance 0.00052820\nStep 40, Average Distance 0.00052820\nStep 50, Average Distance 0.00052820\n" ] ], [ [ "<p style=\"text-indent:20px;\">Добавим метки кластеров в loc_df, и создадим новый DataFrame с параметрами (широта, долгота и метка кластера для каждого кластера).</p>", "_____no_output_____" ] ], [ [ "loc_df['labels'] = idx\ncl_df = pd.DataFrame()\ncl_df['longitude'] = cl_c[:,0]+LON_CONST\ncl_df['latitude'] = cl_c[:,1]+LAT_CONST\ncl_df['labels'] = cl_df.index\ncl_df.tail()", "_____no_output_____" ] ], [ [ "## <center> Визуализация полученых кластеров", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(1,1, figsize = FIG_SIZE)\nfor l in cl_df['labels']:\n plt.plot(loc_df.loc[loc_df['labels'] == l, 'longitude'], \n loc_df.loc[loc_df['labels'] == l, 'latitude'], \n '.',\n markersize = 1.5, \n axes = ax, \n figure = fig\n )\n plt.plot(cl_df.loc[cl_df['labels'] == l, 'longitude'], \n cl_df.loc[cl_df['labels'] == l, 'latitude'], \n 'ro', \n axes = ax, \n figure = fig\n )\n ax.annotate(s=str(l),\n xy=(cl_df.loc[cl_df['labels'] == l, 'longitude'], \n cl_df.loc[cl_df['labels'] == l, 'latitude'])\n )\n \nax.set_axis_off()\nplt.show();", "_____no_output_____" ], [ "# посмотрим где наши кластеры расположились на карте\nchikago_map = folium.Map(location=[LAT_CONST, LON_CONST], \n zoom_start=10,\n tiles='OpenStreetMap'\n )\n# выставляем маркеры на карту Чикаго\nfor lon, lat in zip(cl_df['longitude'], cl_df['latitude']):\n folium.Marker(location=[lat, lon]).add_to(chikago_map)\nchikago_map", "_____no_output_____" ] ], [ [ "<p style=\"text-indent:20px;\">Можно заметить, что две самых удаленных от скопления мест посадок и высодок центроид кластеров находяться ровно около аэропортов(1,3), одна принадлежит северным жилым зонам Чикаго(2), а две центроиды можно отнести на деловой и культурный части (4,0) Чикаго.</p>\n<p style=\"text-indent:20px;\">Может показаться странным, что на южные жилые зоны Чикаго нет ярко выраженной центроиды, но если больше узнать об этом городе, то станет понятно, что это не так уж и странно. Южные кварталы Чикаго - это мексиканские и ирландские районы, в которых уровень жизни ниже северной части Чикаго.</p>", "_____no_output_____" ], [ "## <center>Визуализация трафика между центрами", "_____no_output_____" ], [ "<p style=\"text-indent:20px;\">Для прогноза трафика между кластерами по часам необходимо: выделить час посадки и выставить метки принадлежности определенному кластеру для мест посадки и высадки.</p>", "_____no_output_____" ] ], [ [ "df['pickup_hour'] = df['trip_start_timestamp'].apply(lambda x: parser.parse(x).hour)\ndf['pickup_cluster'] = loc_df.loc[:len(df)-1,'labels'].values\ndf['dropoff_cluster'] = loc_df.loc[len(df):, 'labels'].values", "_____no_output_____" ] ], [ [ "<p style=\"text-indent:20px;\">Начнем делать красоту (т.е. анимацию трафика между кластерами). Тот кто хочет получше разобраться с анимацией в matplotlib можно почитать документацию с <a href='https://matplotlib.org/api/animation_api.html'>официального сайта</a>.</p>", "_____no_output_____" ] ], [ [ "def trafic_animation(lon_ani_lim=None, lat_ani_lim=None, strong=6):\n # передовая пределы возможно ограничить зону \n # изображения анимации\n # так же немаловажен параметр strong\n # который является маштабирующим коэффициентом\n # и влияет на ширину стрелок\n if (lon_ani_lim==None)|(lat_ani_lim==None):\n lim_cl_df = cl_df\n elif (len(lon_ani_lim)!=2)|(len(lat_ani_lim)!=2):\n lim_cl_df = cl_df\n else:\n lim_cl_df = cl_df[\n ((cl_df['longitude']>lon_ani_lim[0])&(cl_df['longitude']<lon_ani_lim[1]))&\n ((cl_df['latitude']>lat_ani_lim[0])&(cl_df['latitude']<lat_ani_lim[1]))\n ]\n fig, ax = plt.subplots(1,1, figsize = FIG_SIZE)\n \n \n # функция, которая будет передоваться в animation.FuncAnimation\n def animate(hour):\n # чистим все что было отрисовано ранее \n ax.clear()\n # отрисовываем все заново\n ax.set_title('Absolute Traffic - Hour' + str(int(hour)) + ':00')\n plt.figure(figsize = FIG_SIZE)\n # статическая часть, она будет неизменна\n # но так как мы чистим все перед этим\n # то нам необходимо будет все отрисовать заново\n for l in lim_cl_df['labels']:\n ax.plot(loc_df.loc[loc_df['labels'] == l, 'longitude'], \n loc_df.loc[loc_df['labels'] == l, 'latitude'], \n '.',\n markersize = 1.5\n )\n ax.plot(cl_df.loc[cl_df['labels'] == l, 'longitude'], \n cl_df.loc[cl_df['labels'] == l, 'latitude'], \n 'ro'\n )\n ax.annotate(s=str(l),\n xy=(cl_df.loc[cl_df['labels'] == l, 'longitude'], \n cl_df.loc[cl_df['labels'] == l, 'latitude'])\n )\n # динамическая часть(стрелочки)\n # они будут изменяться со временем\n for first_label in lim_cl_df['labels']:\n for second_label in lim_cl_df['labels']:\n # расчитываем количество поездов в данный час\n # из первого кластера во второй и из второго в первый\n num_of_rides = len(df[(df['pickup_cluster'] == first_label)&\n (df['dropoff_cluster'] == second_label)&\n (df['pickup_hour'] == hour)])\n # стрелка проводиться как и вектор по двум точкам\n # первую задаем начальными координатами\n # в качестве второй передаем разность от уже заданной\n # до второй точки по обеим осям\n dist_x = cl_df.longitude[cl_df['labels'] == first_label].values[0] - \\\n cl_df.longitude[cl_df['labels'] == second_label].values[0]\n \n dist_y = cl_df.latitude[cl_df['labels'] == first_label].values[0] - \\\n cl_df.latitude[cl_df['labels'] == second_label].values[0]\n # нормировка количества поездок производится по всем поездкам\n pct = np.true_divide(num_of_rides, len(df))\n # непосредственное создание объекта Arrow\n # и его отрисовка\n arr = plt.Arrow(cl_df.longitude[cl_df['labels'] == first_label].values, \n cl_df.latitude[cl_df['labels'] == first_label].values,\n -dist_x,\n -dist_y,\n edgecolor='white',\n width=strong*pct\n )\n ax.add_patch(arr)\n arr.set_facecolor('g')\n ax.set_axis_off()\n \n ani = animation.FuncAnimation(fig,\n animate,\n sorted(df['pickup_hour'].unique()),\n interval=1000\n )\n # производим закрытие отрисованных графиков\n plt.close()\n # дириктори сохранения гифки\n gif_path = GIF_PATH\n # сохранение гифки\n ani.save(gif_path,\n writer='imagemagick',\n fps=1\n )\n # открываем сохраненную гифку и производим ее дешифрование\n # для дальнейшего URL и подстановки их в HTML\n video = io.open(gif_path,\n 'r+b'\n ).read()\n encoded = base64.b64encode(video)\n return encoded \n\nencoded = trafic_animation()\n# производим отрисовку анимации\nHTML(data='''<img src=\"data:image/gif;base64,{0}\"type=\"gif\"/>'''.format(\n encoded.decode('ascii')))", "_____no_output_____" ], [ "# присмотримся к центру города\nencoded = trafic_animation(lon_ani_lim=LON_ANI_CENTER, \n lat_ani_lim=LAT_ANI_CENTER, \n strong=2\n )\nHTML(data='''<img src=\"data:image/gif;base64,{0}\"type=\"gif\"/>'''.format(\n encoded.decode('ascii')))", "_____no_output_____" ] ], [ [ "<p style=\"text-indent:20px;\">Прелесть такого рода визуализации в том, что ее может проинтерпритировать даже ребенок.</p>", "_____no_output_____" ], [ "## <center> Заключение", "_____no_output_____" ], [ "<p style=\"text-indent:20px;\">Tensorflow довольно мощное API, которое хорошо подходит не только для обучения нейронных сетей. Хотя стоит отметит скудность документации(по сравнению с sklearn) по некоторым частям библиотеки. Одна из таких частей и была рассмотренна в данном тьюториале. Я так же надеюсь вам понравилась визуализации и вы влюбились в нее так же как и я когда впервые ее увидел. Если такого рода тьюториал вам понравится, то я подумаю о переносе его в виде статьи на хабрахабр и создании цикла такого рода статей.</p>", "_____no_output_____" ], [ "<p style=\"text-indent:20px;\">Спасибо за внимание!</p>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ] ]
4a7bbdc54474fc3680b544f33cd4761fd51a9d11
35,914
ipynb
Jupyter Notebook
docs/source/notebooks/DiCE_model_agnostic_CFs.ipynb
prabhathur/CF
20943f3f326e72ea7c5464bc2c3eee06703ed404
[ "MIT" ]
null
null
null
docs/source/notebooks/DiCE_model_agnostic_CFs.ipynb
prabhathur/CF
20943f3f326e72ea7c5464bc2c3eee06703ed404
[ "MIT" ]
null
null
null
docs/source/notebooks/DiCE_model_agnostic_CFs.ipynb
prabhathur/CF
20943f3f326e72ea7c5464bc2c3eee06703ed404
[ "MIT" ]
null
null
null
29.437705
347
0.409172
[ [ [ "# Generating counterfactual explanations with any ML model", "_____no_output_____" ], [ "The goal of this notebook is to show how to generate CFs for ML models using frameworks other than TensorFlow or PyTorch. This is a work in progress and here we show a method to generate diverse CFs by independent random sampling of features. We use scikit-learn models for demonstration. ", "_____no_output_____" ] ], [ [ "# import DiCE\nimport dice_ml\nfrom dice_ml.utils import helpers # helper functions\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.metrics import classification_report, accuracy_score", "_____no_output_____" ] ], [ [ "## Loading dataset", "_____no_output_____" ], [ "We use the \"adult\" income dataset from UCI Machine Learning Repository (https://archive.ics.uci.edu/ml/datasets/adult). For demonstration purposes, we transform the data as described in dice_ml.utils.helpers module.", "_____no_output_____" ] ], [ [ "dataset = helpers.load_adult_income_dataset()", "_____no_output_____" ], [ "dataset.head()", "_____no_output_____" ], [ "d = dice_ml.Data(dataframe=dataset, continuous_features=['age', 'hours_per_week'], outcome_name='income')", "_____no_output_____" ] ], [ [ "## Training a custom ML model", "_____no_output_____" ], [ "Below, we build an Artificial Neural Network using *MLPClassifier* in scikit-learn. We try to use the same set of parameters as used in this advanced [notebook](DiCE_with_private_data.ipynb), however, there are other framework-dependent parameters that can't be easily ported, so the accuracy/performance of the two models will be different.", "_____no_output_____" ] ], [ [ "train, test = d.split_data(d.normalize_data(d.one_hot_encoded_data))\nX_train = train.loc[:, train.columns != 'income']\ny_train = train.loc[:, train.columns == 'income']\nX_test = test.loc[:, test.columns != 'income']\ny_test = test.loc[:, test.columns == 'income']", "_____no_output_____" ], [ "mlp = MLPClassifier(hidden_layer_sizes=(20), alpha=0.001, learning_rate_init=0.01, batch_size=32, random_state=17,\n max_iter=20, verbose=False, validation_fraction=0.2, ) #max_iter is epochs in TF\nmlp.fit(X_train, y_train.values.ravel())", "c:\\users\\t-rakom\\python-virtual-environments\\dice_model_change\\lib\\site-packages\\sklearn\\neural_network\\_multilayer_perceptron.py:585: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (20) reached and the optimization hasn't converged yet.\n % self.max_iter, ConvergenceWarning)\n" ], [ "# provide the trained ML model to DiCE's model object\nbackend = None\nm = dice_ml.Model(model=mlp, backend=backend)", "_____no_output_____" ] ], [ [ "## Generate diverse counterfactuals", "_____no_output_____" ] ], [ [ "# initiate DiCE\nexp = dice_ml.Dice(d, m)", "_____no_output_____" ], [ "# query instance in the form of a dictionary; keys: feature name, values: feature value\nquery_instance = {'age':22, \n 'workclass':'Private', \n 'education':'HS-grad', \n 'marital_status':'Single', \n 'occupation':'Service',\n 'race': 'White', \n 'gender':'Female', \n 'hours_per_week': 45}", "_____no_output_____" ], [ "# generate counterfactuals\ndice_exp = exp.generate_counterfactuals(query_instance, total_CFs=4, desired_class=\"opposite\")", "Diverse Counterfactuals found! total time taken: 00 min 00 sec\n" ], [ "dice_exp.visualize_as_dataframe(show_only_changes=True)", "Query instance (original outcome : 1)\n" ] ], [ [ "It can be observed that the random sampling method produces less sparse CFs in contrast to current DiCE's implementation. The sparsity issue with random sampling worsens with increasing *total_CFs* ", "_____no_output_____" ], [ "Further, different sets of counterfactuals can be generated with different random seeds.", "_____no_output_____" ] ], [ [ "# generate counterfactuals\ndice_exp = exp.generate_counterfactuals(query_instance, total_CFs=4, desired_class=\"opposite\", random_seed=9) # default ranomd see is 17", "Diverse Counterfactuals found! total time taken: 00 min 00 sec\n" ], [ "dice_exp.visualize_as_dataframe(show_only_changes=True)", "Query instance (original outcome : 1)\n" ] ], [ [ "### Selecting the features to vary", "_____no_output_____" ], [ "When few features are fixed, random sampling is unable to generate valid CFs while we get valid diverse CFs with current DiCE.", "_____no_output_____" ] ], [ [ "# generate counterfactuals\ndice_exp = exp.generate_counterfactuals(query_instance, total_CFs=4, desired_class=\"opposite\",\n features_to_vary=['workclass','education','occupation','hours_per_week'])", "Only 0 (required 4) Diverse Counterfactuals found for the given configuation, perhaps try with different values of proximity (or diversity) weights or learning rate... ; total time taken: 00 min 00 sec\n" ], [ "dice_exp.visualize_as_dataframe(show_only_changes=True)", "Query instance (original outcome : 1)\n" ] ], [ [ "### Choosing feature ranges", "_____no_output_____" ], [ "Since the features are sampled randomly, they can freely vary across their range. In the below example, we show how range of continuous features can be controlled using *permitted_range* parameter that can now be passed during CF generation.", "_____no_output_____" ] ], [ [ "# generate counterfactuals\ndice_exp = exp.generate_counterfactuals(query_instance, total_CFs=4, desired_class=\"opposite\",\n permitted_range={'age':[22,50],'hours_per_week':[40,60]})", "Diverse Counterfactuals found! total time taken: 00 min 00 sec\n" ], [ "dice_exp.visualize_as_dataframe(show_only_changes=True)", "Query instance (original outcome : 1)\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ] ]
4a7bca0d829625338b388cd45ea1a57d056a0cb8
32,551
ipynb
Jupyter Notebook
Python Basic Tutorials/2. NumPy Tutorial.ipynb
iamstarstuff/PhysicStuff
99b057ff028ef10b0b4228fee5db7f7c7f2630ee
[ "MIT" ]
3
2021-06-12T16:14:06.000Z
2021-08-04T05:22:07.000Z
Python Basic Tutorials/2. NumPy Tutorial.ipynb
iamstarstuff/PhysicStuff
99b057ff028ef10b0b4228fee5db7f7c7f2630ee
[ "MIT" ]
null
null
null
Python Basic Tutorials/2. NumPy Tutorial.ipynb
iamstarstuff/PhysicStuff
99b057ff028ef10b0b4228fee5db7f7c7f2630ee
[ "MIT" ]
null
null
null
22.699442
459
0.495899
[ [ [ "# NumPy\nNumPy is the fundamental package for scientific computing in Python. It is a Python library that provides a multidimensional array object, various derived objects (such as masked arrays and matrices), and an assortment of routines for fast operations on arrays, including mathematical, logical, shape manipulation, sorting, selecting, I/O, discrete Fourier transforms, basic linear algebra, basic statistical operations, random simulation and much more.", "_____no_output_____" ], [ "- NumPy is a python library, stands for Numerical Python\n- Used for working with arrays. It is very useful in Numerical calculations - matrices, linear algebra, etc\n- The array object in NumPy is called ndarray (n-dimensional array). Arrays are frequently used in data sciences, where speed and accuracy matters. It is similar to list but it is way faster than that.\n- Elements in NumPy array cannot be heterogeneous like in lists. The elements in a NumPy array are all required to be of the same data type, and thus will be the same size in memory.\n- NumPy arrays have a fixed size at creation, unlike Python lists (which can grow dynamically). Changing the size of an ndarray will create a new array and delete the original.\n- NumPy library was written partially in Python, but most of the parts that require fast computation are written in C or C++.\n- For detailed information you can go through the [official documentation](https://numpy.org/doc/stable/user/absolute_beginners.html#numpy-the-absolute-basics-for-beginners) \n- [Source code for NumPy](https://github.com/numpy/numpy)", "_____no_output_____" ] ], [ [ "# To import the library use\nimport numpy", "_____no_output_____" ], [ "# add keyword numpy before using\na = numpy.array([1,2,3,4,5]) # defines a as numpy object \n# array is enclosed in ([])", "_____no_output_____" ] ], [ [ "NumPy is imported under the alias using the keyword \"as\" - import numpy as np \nThis shortens the keyword required in syntax, instead of numpy.array we can type np.array", "_____no_output_____" ] ], [ [ "import numpy as np\na = np.array([1,2,3,4,5])\nb = [1,2,3,4,5]\nprint(a)\nprint(b)\nprint(type(a)) # shows the type\nprint(type(b))", "[1 2 3 4 5]\n[1, 2, 3, 4, 5]\n<class 'numpy.ndarray'>\n<class 'list'>\n" ] ], [ [ "Notice the output of print(a), it is enclosed in square brackets like lists but not separated by commas like lists. Hence the output is a numpy array.", "_____no_output_____" ] ], [ [ "#Use Tuple to create numpy array\nimport numpy as np\na = np.array((1,2,3,4,5))\nprint(a)\nprint(type(a))\n# To create an ndarray, we can pass a list, tuple or any array-like object into the array() method.", "[1 2 3 4 5]\n<class 'numpy.ndarray'>\n" ] ], [ [ "## Dimensions in Array\nA dimension in array is one level of array depth\n\n- nested arrays: are arrays that have arrays as elements.\n\n#### Check Number of Dimensions of array\n*ndim* attribute returns an integer that tells us how many dimensions an array has\n\nif a is defined as an array, to check the dimensions of a, the syntax is - a.ndim\n\n### 0-D Arrays\n- 0-D Arrays or scalars are elements in array, each value in array is 0-D array.", "_____no_output_____" ] ], [ [ "import numpy as np\na = np.array(9) # single element\nprint(a)\nprint(a.ndim) #prints the dimension of an array", "9\n0\n" ] ], [ [ "### 1-D Arrays\nAn array that has 0D Arrays as its elements.", "_____no_output_____" ] ], [ [ "a = np.array([1,2,3,4,5])\nprint(a)\nprint(a.ndim)", "[1 2 3 4 5]\n1\n" ] ], [ [ "### 2-D Arrays\nAn array that has 1-D elements is called a 2D array\n\nRepresents a matrix\n\nNote: NumPy also has a submodule dedicated for matrix operations called numpy.mat (go through [documentation](https://numpy.org/doc/stable/reference/generated/numpy.mat.html))", "_____no_output_____" ] ], [ [ "import numpy as np\na = np.array([[1,2,3],[4,5,6]])\nprint(a)\nprint(a.ndim)", "[[1 2 3]\n [4 5 6]]\n2\n" ] ], [ [ "### 3-D Arrays\nAn array of 2D arrays is called a 3D array.", "_____no_output_____" ] ], [ [ "import numpy as np\na = np.array([[[1,2,3],[4,5,6],[7,8,9]],[[9,8,7],[6,5,4],[3,2,1]]])\nprint(a)\nprint(a.ndim)", "[[[1 2 3]\n [4 5 6]\n [7 8 9]]\n\n [[9 8 7]\n [6 5 4]\n [3 2 1]]]\n3\n" ], [ "# Common example to demonstrate dimensions\nimport numpy as np\na = np.array(45)\nb = np.array([1,2,3,4,5])\nc = np.array([[1,2,3],[4,5,6]])\nd = np.array([[1,2,3],[4,5,6],[7,8,9]])\ne = np.array([[[1,2,3],[4,5,6]],[[1,2,3],[4,5,6]]])\n# Pay very close attention to the number of square brackets.\n# One neat trick is the number of square brackets at the beginning is the dimensions of that array.\nprint(a,'\\n')\nprint(b,'\\n')\nprint(c,'\\n')\nprint(d,'\\n')\nprint(e,'\\n')\n\nprint(\"The dimension of\",'\\n',a,\"is --\",a.ndim)\nprint(\"The dimension of\",'\\n',b,\"is --\",b.ndim)\nprint(\"The dimension of\",'\\n',c,\"is --\",c.ndim)\nprint(\"The dimension of\",'\\n',d,\"is --\",d.ndim)\nprint(\"The dimension of\",'\\n',e,\"is --\",e.ndim)", "45 \n\n[1 2 3 4 5] \n\n[[1 2 3]\n [4 5 6]] \n\n[[1 2 3]\n [4 5 6]\n [7 8 9]] \n\n[[[1 2 3]\n [4 5 6]]\n\n [[1 2 3]\n [4 5 6]]] \n\nThe dimension of \n 45 is -- 0\nThe dimension of \n [1 2 3 4 5] is -- 1\nThe dimension of \n [[1 2 3]\n [4 5 6]] is -- 2\nThe dimension of \n [[1 2 3]\n [4 5 6]\n [7 8 9]] is -- 2\nThe dimension of \n [[[1 2 3]\n [4 5 6]]\n\n [[1 2 3]\n [4 5 6]]] is -- 3\n" ], [ "# To make an array of desired dimensions\na = np.array([1,2,3,4],ndmin=7)\nprint(a)\nprint(\"Number of dimensions: \",a.ndim)", "[[[[[[[1 2 3 4]]]]]]]\nNumber of dimensions: 7\n" ] ], [ [ "## Access Array Elements\nArray indexing is the same as accessing an array element.\n\nYou can access an array element by referring to its index number.", "_____no_output_____" ] ], [ [ "import numpy as np\na = np.array([1,2,3,4])\nprint(a[0]) # Remember! first element has 0 index in python", "1\n" ], [ "'''To access elements from 2-D arrays we can use comma separated integers representing \nthe dimension and the index of the element.'''\n\na = np.array([[1,2,3,4,5],[6,7,8,9,10]]) \n#[1,2,3,4,5] = 0th dimension, [6,7,8,9,10] = 1st dimension\n\nprint(a[0,1]) # first index = 0 ->selects 1st array, second index = 1 ->selects second element of first array", "2\n" ], [ "print(a[1,3]) #syntax - a[dimension,element]", "9\n" ], [ "a = np.array([[[1,2,3],[4,5,6]],[[7,8,9],[10,11,12]]])\n\nprint(a[0,1,1]) \n'''\nfirst index = 0 -> Selects [[1,2,3],[4,5,6]]\nsecond index = 1 -> Selects [4,5,6]\nthird index = 1 -> Selects 5\n'''\nprint(\"Dimensions of a: \",a.ndim)", "5\nDimensions of a: 3\n" ], [ "a.shape\n", "_____no_output_____" ] ], [ [ "a has 2 elements `[[1,2,3],[4,5,6]]` & `[[7,8,9],[10,11,12]]` \nof which each has 2 elements `[1,2,3]` & `[4,5,6]` of 1st element; `[7,8,9]` & `[10,11,12]` of 2nd element \nof which each has 3 elements `1,2,3` .... and so on you get the point \n`a.shape` returns (2,2,3) which is the shape of an array ", "_____no_output_____" ], [ "## Slicing Arrays\nSyntax [start_inclusive:end_exclusive]\n\nalso\n\n[start:end:step]\n\nLeaving start or end index blank will mean start from beginning and go till end respectively", "_____no_output_____" ] ], [ [ "a = np.array([1,2,3,4,5,6,7,8,9])\nprint(a[1:5]) # From 1st index to 4th index", "[2 3 4 5]\n" ], [ "a[:5] # From beginning to 4th index", "_____no_output_____" ], [ "a[5:]", "_____no_output_____" ], [ "a[2:6:2] # from index 2 to 5 in steps of 2", "_____no_output_____" ], [ "b = np.array([1,2,3,4,5,6,7])\nc = np.array_split(b,3) # array_split(array,no. of splits)\nprint(c)", "[array([1, 2, 3]), array([4, 5]), array([6, 7])]\n" ] ], [ [ "## Random\nNumPy has a function `random` which creates an array of given shape and populate it with random samples from a uniform distribution over `[0,1)` [Documentation](https://numpy.org/doc/stable/reference/random/generated/numpy.random.rand.html)", "_____no_output_____" ] ], [ [ "# import random from numpy so that we don't have to write np.random.rand()\nfrom numpy import random \nx = random.rand() #returns a random float between 0 and 1\nx", "_____no_output_____" ] ], [ [ "`random.randint(low, high=None, size=None, dtype=int)` \nReturn random integers from low (inclusive) to high (exclusive). \n\nReturn random integers from the “discrete uniform” distribution of the specified dtype in the “half-open” interval [low, high). If high is None (the default), then results are from [0, low). \n[Documentation](https://numpy.org/doc/stable/reference/random/generated/numpy.random.randint.html#numpy-random-randint)", "_____no_output_____" ] ], [ [ "x = random.randint(100, size=(5)) #gives an array of 5 random integers between 0 and 100\nx", "_____no_output_____" ], [ "x = random.randint(100, size=(3,3)) # gives a 3 x 3 array\nx", "_____no_output_____" ], [ "x = random.choice([3,5,7,9]) # chooses a random value from given array\nx", "_____no_output_____" ], [ "x = random.choice([3,5,7,9],size=(3,3)) # creates a 3 x 3 array by choosing values randomly from given array\nx", "_____no_output_____" ], [ "x = random.randint(100, size=(5))\ny = np.random.shuffle(x)\nprint(x)\nprint(y)", "[28 11 57 51 15]\nNone\n" ], [ "x = random.randint(1000,size=(10)) # 10 random values between 0 and 1000\nprint(x) \nprint(np.var(x)) # Variance\nprint(np.std(x)) # Standard Deviation\nprint(np.average(x)) # Average", "[627 90 246 616 904 596 83 604 850 692]\n76921.56\n277.3473634271651\n530.8\n" ] ], [ [ "`np.random.randn()` returns a sample(or samples) from the \"Standard Normal\" Distribution. \nIf positive int_like arguments are provided, `randn` generates an array of shape (d0,d1,...,dn), filled with random floats sampled from a univariate \"normal\" (Gaussian) distribution of mean 0 and variance 1. A single float randomly sampled from the distribution is returned if no argument is provided.", "_____no_output_____" ] ], [ [ "x = np.random.randn(10)\nx", "_____no_output_____" ] ], [ [ "Modify a sequence in-place by shuffling its contents.\n\nThis function only shuffles the array along the first axis of a multi-dimensional array. The order of sub-arrays is changed but their contents remains the same.", "_____no_output_____" ] ], [ [ "x = np.array([1,2,3,4,5,6,7,8,9,10])\nrandom.shuffle(x) \nx ", "_____no_output_____" ] ], [ [ "## Products", "_____no_output_____" ] ], [ [ "p1 = np.inner(2,2) # gives inner product\n\nv_a = 9 + 6j\nv_b = 5 + 2j\np2 = np.inner(v_a,v_b) # inner product of 2 vectors\nprint(p1) \nprint(p2)", "4\n(33+48j)\n" ], [ "a1 = np.array([[2,6],[7,8]])\na2 = np.array([[5,10],[-2,3]])\np3 = np.inner(a1,a2)\nprint(p3)", "[[ 70 14]\n [115 10]]\n" ], [ "# Cross Product\np4 = np.cross(a1,a2)\nprint(p4)", "[-10 37]\n" ], [ "# Dot Product\np5 = np.dot(a1,a2)\np5", "_____no_output_____" ] ], [ [ "If we just want the indices where a certain condition is satisfied, we can use `np.where( )`. This function is used to filter out data.", "_____no_output_____" ] ], [ [ "x = np.array([0,1,2,3,4,5,6,7,8,9])\nindices = np.where(x<5)\nx[indices]", "_____no_output_____" ] ], [ [ "**Functions like np.arange, np.linspace are very useful:**\n\nnp.arange (read as 'a range') gives an array of numbers within a given range and stepsize\n\nnp.linspace gives an array of linearly spaced numbers", "_____no_output_____" ] ], [ [ "np.arange(0,10,3) # syntax - (inclusive_start, exclusive_stop, stepsize)\n#This will give an array of values from 0 to 10 in steps of 3", "_____no_output_____" ], [ "np.arange(-np.pi, np.pi, 1) ", "_____no_output_____" ], [ "np.linspace(-np.pi, np.pi, 7) # linearly spaced values - difference between 2 consecutive values not necessarily 1", "_____no_output_____" ] ], [ [ "**Notice** the difference between `np.arange()` function and `np.linspace()` function:\n\n`np.arange` function gives values which have same difference but doesn't include the last value, whereas `np.linspace` function first sets start and end value and divides the numbers linearly.\n\nThis changes the output of both of these function significantly. \n\nIn the syntax of `np.arange` function the **last value denotes the difference between each element**. But in `np.linspace` function the **last value denotes the number of elements desired in the given range**, the difference between each element is determined accoridingly by the system.", "_____no_output_____" ] ], [ [ "np.linspace(0,np.pi,10) #syntax - (inclusive_start, INCLUSIVE_stop, Number of elements)", "_____no_output_____" ] ], [ [ "## NumPy Logarithms\nNumPy has functions to perform log at base 2, e and 10\n- `log2()` - log to the base 2\n- `log10()` - log to the base 10\n- `log()` - natural log / base $\\mathcal{e}$", "_____no_output_____" ] ], [ [ "#log to the base 2\nx = np.arange(1,10)\nprint(x)\nprint(np.log2(x))", "[1 2 3 4 5 6 7 8 9]\n[0. 1. 1.5849625 2. 2.32192809 2.5849625\n 2.80735492 3. 3.169925 ]\n" ], [ "# log to the base 10\nprint(np.log10(x))", "[0. 0.30103 0.47712125 0.60205999 0.69897 0.77815125\n 0.84509804 0.90308999 0.95424251]\n" ], [ "# log to the base e or natural log (ln)\nprint(np.log(x))", "[0. 0.69314718 1.09861229 1.38629436 1.60943791 1.79175947\n 1.94591015 2.07944154 2.19722458]\n" ] ], [ [ "## NumPy LCM and GCD\nNumPy has the functions `np.lcm` and `np.gcd`. \n\nWe can also use this functions to find lcm and gcd of each element in an array using the $reduce( )$ method", "_____no_output_____" ] ], [ [ "x = 4\ny = 12\nlcm = np.lcm(x,y)\nlcm", "_____no_output_____" ], [ "gcd = np.gcd(x,y)\ngcd", "_____no_output_____" ], [ "x = np.arange(2,10)\ny = np.lcm.reduce(x) # use reduce() when the element is an array\ny", "_____no_output_____" ], [ "x = np.array([4,44,40,20,22])\nnp.gcd.reduce(x)", "_____no_output_____" ], [ "x = np.random.randint(100,size=2)\nprint(x)\nprint(np.gcd.reduce(x))", "[81 33]\n3\n" ] ], [ [ "## Convert Degrees into Radians and Radians to Degrees\nBy default the values are in radians, but we can convert it into degrees and vice versa if required\n\n$$180^\\circ=\\pi\\;rad$$\n$$\\therefore 1\\;rad=\\Big(\\frac{180}{\\pi}\\Big)^\\circ$$", "_____no_output_____" ] ], [ [ "# Suppose we have array of values in degrees\nimport numpy as np\nx = np.array([0,30,45,60,90,180,270,360])\nradian = np.deg2rad(x)\nprint(radian)", "[0. 0.52359878 0.78539816 1.04719755 1.57079633 3.14159265\n 4.71238898 6.28318531]\n" ], [ "degree = np.rad2deg(radian)\nprint(degree)", "[ 0. 30. 45. 60. 90. 180. 270. 360.]\n" ], [ "y = np.array([np.pi/2, np.pi, 4*np.pi/3, 2*np.pi])\ny_degree = np.rad2deg(y)\nprint(y_degree)", "[ 90. 180. 240. 360.]\n" ] ], [ [ "NumPy also has the function to find angles i.e inverse trig values\n\narcsin( ), arccos( ), arctan( )", "_____no_output_____" ] ], [ [ "x = np.arcsin(0.8)\nx_deg = np.rad2deg(x)\nprint(x)\nprint(round(x_deg,2)) # round(x_deg,2) rounds off the value of x_deg to 2 decimal places", "0.9272952180016123\n53.13\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
4a7be2e9aa71ef6dc959d7761fe09569c20e96cc
640,234
ipynb
Jupyter Notebook
analisis_resultados_achiote.ipynb
PabloViana12580/investigacion_achiote
c3e41c8b694f0701f516c779bff77b9b732cb0fe
[ "Apache-2.0" ]
null
null
null
analisis_resultados_achiote.ipynb
PabloViana12580/investigacion_achiote
c3e41c8b694f0701f516c779bff77b9b732cb0fe
[ "Apache-2.0" ]
null
null
null
analisis_resultados_achiote.ipynb
PabloViana12580/investigacion_achiote
c3e41c8b694f0701f516c779bff77b9b732cb0fe
[ "Apache-2.0" ]
null
null
null
123.573441
35,138
0.76674
[ [ [ "# Analisis de resultados encuesta conocimiento y actitudes ante el uso del achiote", "_____no_output_____" ], [ "#### Cargamos librerias a utilizar", "_____no_output_____" ] ], [ [ "library(\"dplyr\")\nlibrary(\"tidytext\")\nlibrary(\"tm\")\nlibrary(\"ggplot2\")\nlibrary(\"stringr\")\nlibrary(\"corrplot\")\nlibrary(\"cluster\")", "_____no_output_____" ] ], [ [ "#### Leemos los archivo Csv", "_____no_output_____" ] ], [ [ "achiote1 <- read.csv(\"first_chunk.csv\", header = FALSE)\nachiote2 <- read.csv(\"second_chunk.csv\", header = FALSE)", "_____no_output_____" ] ], [ [ "### Hacemos limpieza para preparar los datos", "_____no_output_____" ], [ "#### Borramos columnas innecesarias", "_____no_output_____" ] ], [ [ "a1_clean <- achiote1[c(-1, -2),-c(1,68)]\na2_clean <- achiote2[c(-1, -2),-c(1,68)]", "_____no_output_____" ] ], [ [ "#### Debido a las preguntas con múltiples respuestas, debemos combinar las columnas pertinentes", "_____no_output_____" ] ], [ [ "a1_clean$V18 <- paste(a1_clean$V18, a1_clean$V19, a1_clean$V20, a1_clean$V21, a1_clean$V22, a1_clean$V23, sep = \" \")\na1_clean$V24 <- paste(a1_clean$V24, a1_clean$V25, a1_clean$V26, a1_clean$V27, a1_clean$V28, a1_clean$V29, a1_clean$V30, a1_clean$V31, a1_clean$V32, a1_clean$V33, a1_clean$V34, sep = \" \")\na1_clean$V36 <- paste(a1_clean$V36, a1_clean$V37, a1_clean$V38, a1_clean$V39, a1_clean$V40, a1_clean$V41, a1_clean$V42, sep = \" \")\na1_clean$V47 <- paste(a1_clean$V47, a1_clean$V48, a1_clean$V49, a1_clean$V50, a1_clean$V51, a1_clean$V52, a1_clean$V53, a1_clean$V54, a1_clean$V55, sep = \" \")\na1_clean$V58 <- paste(a1_clean$V58, a1_clean$V59, a1_clean$V60, a1_clean$V61, a1_clean$V62, a1_clean$V63, a1_clean$V64, a1_clean$V65, sep = \" \")\n\na2_clean$V18 <- paste(a2_clean$V18, a2_clean$V19, a2_clean$V20, a2_clean$V21, a2_clean$V22, a2_clean$V23, sep = \" \")\na2_clean$V24 <- paste(a2_clean$V24, a2_clean$V25, a2_clean$V26, a2_clean$V27, a2_clean$V28, a2_clean$V29, a2_clean$V30, a2_clean$V31, a2_clean$V32, a2_clean$V33, a2_clean$V34, sep = \" \")\na2_clean$V36 <- paste(a2_clean$V36, a2_clean$V37, a2_clean$V38, a2_clean$V39, a2_clean$V40, a2_clean$V41, a2_clean$V42, sep = \" \")\na2_clean$V47 <- paste(a2_clean$V47, a2_clean$V48, a2_clean$V49, a2_clean$V50, a2_clean$V51, a2_clean$V52, a2_clean$V53, a2_clean$V54, a2_clean$V55, sep = \" \")\na2_clean$V58 <- paste(a2_clean$V58, a2_clean$V59, a2_clean$V60, a2_clean$V61, a2_clean$V62, a2_clean$V63, a2_clean$V64, a2_clean$V65, sep = \" \")", "_____no_output_____" ] ], [ [ "#### Eliminamos columnas repetidas", "_____no_output_____" ] ], [ [ "a1_clean <- subset(a1_clean, select=-c(V19,V20,V21,V22,V23,V25,V26,V27,V28,V29,V30,V31,V32,V33,V34,V37,V38,V39,V40,V41,V42,V48,V49,V50,V51,V52,V53,V54,V55,V59,V60,V61,V62,V63,V64,V65))\na2_clean <- subset(a2_clean, select=-c(V19,V20,V21,V22,V23,V25,V26,V27,V28,V29,V30,V31,V32,V33,V34,V37,V38,V39,V40,V41,V42,V48,V49,V50,V51,V52,V53,V54,V55,V59,V60,V61,V62,V63,V64,V65))\n", "_____no_output_____" ] ], [ [ "#### Cambiamos titulos de los data sets", "_____no_output_____" ] ], [ [ "colnames(a1_clean) <- c(\"Id\",\n \"Ip\",\n \"Acceso\",\n \"Fecha_inicio\",\n \"Fecha_finalizacion\",\n \"Procedencia\",\n \"Municipio\",\n \"Edad\",\n \"Sexo\",\n \"Q1\",\n \"Q2\",\n \"Q3\",\n \"Q4\",\n \"Justificacion_1\",\n \"He leido\",\n \"Q5\",\n \"Q6\",\n \"Q7\",\n \"Q8\",\n \"Q9\",\n \"Q10\",\n \"Q11\",\n \"Q12\",\n \"Q13\",\n \"Q14\",\n \"Q15\",\n \"Justificacion_2\",\n \"Q16\",\n \"Q17\",\n \"Justificacion_3\")\ncolnames(a2_clean) <- c(\"Id\",\n \"Ip\",\n \"Acceso\",\n \"Fecha_inicio\",\n \"Fecha_finalizacion\",\n \"Procedencia\",\n \"Municipio\",\n \"Edad\",\n \"Sexo\",\n \"Q1\",\n \"Q2\",\n \"Q3\",\n \"Q4\",\n \"Justificacion_1\",\n \"He leido\",\n \"Q5\",\n \"Q6\",\n \"Q7\",\n \"Q8\",\n \"Q9\",\n \"Q10\",\n \"Q11\",\n \"Q12\",\n \"Q13\",\n \"Q14\",\n \"Q15\",\n \"Justificacion_2\",\n \"Q16\",\n \"Q17\",\n \"Justificacion_3\")", "_____no_output_____" ] ], [ [ "#### Unimos nuestros datasets separados en uno solo", "_____no_output_____" ] ], [ [ "achiote <- rbind(a1_clean, a2_clean)", "_____no_output_____" ] ], [ [ "### Procedemos a eliminar caracteres bizarros de los textos de la encuesta", "_____no_output_____" ], [ "#### Seteamos el encoding a UTF-8 para aceptar tildes y demás caracteres", "_____no_output_____" ] ], [ [ "Encoding(achiote$Edad) <- \"UTF-8\"\nEncoding(achiote$Municipio) <- \"UTF-8\"\nEncoding(achiote$Q1) <- \"UTF-8\"\nEncoding(achiote$Q6) <- \"UTF-8\"\nEncoding(achiote$Q7) <- \"UTF-8\"\nEncoding(achiote$Q11) <- \"UTF-8\"\nEncoding(achiote$Q12) <- \"UTF-8\"\nEncoding(achiote$Q13) <- \"UTF-8\"\nEncoding(achiote$Q14) <- \"UTF-8\"\nEncoding(achiote$Q16) <- \"UTF-8\"\nEncoding(achiote$Justificacion_1) <- \"UTF-8\"\nEncoding(achiote$Justificacion_2) <- \"UTF-8\"\nEncoding(achiote$Justificacion_3) <- \"UTF-8\"", "_____no_output_____" ] ], [ [ "#### Eliminamos palabras que hacen ruido", "_____no_output_____" ] ], [ [ "achiote$Q1 <- gsub(\"Si, ¿cuál? ¿cómo?:\", \"\", achiote$Q1, fixed=TRUE)\nachiote$Q5 <- gsub(\"Si, especifique:\", \"\", achiote$Q5, fixed=TRUE)\nachiote$Q7 <- gsub(\"Otro::\", \"\", achiote$Q7, fixed=TRUE)\nachiote$Q12 <- gsub(\"Conozco otra/otras, ¿cuál/cuáles?:\", \"\", achiote$Q12, fixed=TRUE)\nachiote$Q13 <- gsub(\"Si, ¿cuál? ¿cómo?:\", \"\", achiote$Q13, fixed=TRUE)", "_____no_output_____" ] ], [ [ "### Revisamos el producto final de la limpieza", "_____no_output_____" ] ], [ [ "head(achiote)\ntail(achiote)", "_____no_output_____" ] ], [ [ "#### Manejamos fechas y tiempos", "_____no_output_____" ] ], [ [ "#fechas\nfecha_inicio <- as.Date(substr(achiote$Fecha_inicio, 0, 8), \"%m/%d/%y\")\nfecha_final <- as.Date(substr(achiote$Fecha_finalizacion, 0, 8), \"%m/%d/%y\")\n#horas minutos\nhora_inicio <- as.POSIXct(substr(achiote$Fecha_inicio, 11, 15), format = \"%H:%M\")\nhora_final <- as.POSIXct(substr(achiote$Fecha_finalizacion, 11, 15), format = \"%H:%M\")\n#creando columnas\nachiote$Fecha_inicio <- fecha_inicio\nachiote$Fecha_finalizacion <- fecha_final\nachiote$hora_inicio <- hora_inicio\nachiote$hora_final <- hora_final\ndif <- hora_final - hora_inicio\nachiote$tiempo_terminar <- dif\n", "_____no_output_____" ] ], [ [ "#### Convertimos a factor los datos necesarios", "_____no_output_____" ] ], [ [ "achiote[sapply(achiote, is.character)] <- lapply(achiote[sapply(achiote, is.character)], \n as.factor)\nsummary(achiote)", "_____no_output_____" ], [ "str(achiote)", "'data.frame':\t101 obs. of 33 variables:\n $ Id : Factor w/ 100 levels \"1\",\"10\",\"100\",..: 1 13 24 35 46 57 68 79 90 2 ...\n $ Ip : Factor w/ 93 levels \"143.208.57.62\",..: 67 45 68 11 88 47 89 55 25 60 ...\n $ Acceso : Factor w/ 1 level \"Public Access\": 1 1 1 1 1 1 1 1 1 1 ...\n $ Fecha_inicio : Date, format: \"2020-09-18\" \"2020-09-18\" ...\n $ Fecha_finalizacion: Date, format: \"2020-09-18\" \"2020-09-18\" ...\n $ Procedencia : Factor w/ 2 levels \"Casco rural\",..: 2 2 2 2 2 2 2 2 1 2 ...\n $ Municipio : Factor w/ 29 levels \"Amatitlan\",\"Asunción Mita\",..: 13 28 13 13 13 5 13 13 13 13 ...\n $ Edad : Factor w/ 3 levels \"15-35 años\",\"36-55 años\",..: 1 1 1 1 1 1 1 1 1 1 ...\n $ Sexo : Factor w/ 2 levels \"Femenino\",\"Masculino\": 2 2 2 1 2 2 2 2 2 1 ...\n $ Q1 : Factor w/ 61 levels \"\",\"\\\"Mata ratón\\\", se cocina y con esa agua se da un baño para bajar la fiebre\",..: 28 38 38 38 38 56 38 38 9 51 ...\n $ Q2 : Factor w/ 2 levels \"No\",\"Si\": 2 2 1 1 1 1 2 1 2 2 ...\n $ Q3 : Factor w/ 3 levels \"\",\"No\",\"Si\": 1 1 3 3 3 3 1 3 1 1 ...\n $ Q4 : Factor w/ 6 levels \"\",\"1\",\"2\",\"3\",..: 1 1 3 4 4 5 1 4 1 1 ...\n $ Justificacion_1 : Factor w/ 29 levels \"\",\"Conozco que tiene propiedades positivas pero no sé cuán efectivas sean en las personas\",..: 1 1 21 13 15 9 1 2 1 1 ...\n $ He leido : Factor w/ 2 levels \"\",\"si\": 1 1 2 2 2 2 1 2 1 1 ...\n $ Q5 : Factor w/ 4 levels \"\",\"Bija\",\"No\",..: 3 3 1 1 1 1 3 1 3 3 ...\n $ Q6 : Factor w/ 12 levels \" \",\" Medicinal \",..: 9 9 1 1 1 1 9 1 10 9 ...\n $ Q7 : Factor w/ 23 levels \" \",\" No conozco ninguna aplicación del achiote \",..: 19 8 1 1 1 1 8 1 8 8 ...\n $ Q8 : Factor w/ 5 levels \"\",\"Mensualmente\",..: 5 5 1 1 1 1 5 1 2 2 ...\n $ Q9 : Factor w/ 12 levels \" \",\" No obtengo achiote \",..: 9 9 1 1 1 1 10 1 9 5 ...\n $ Q10 : Factor w/ 3 levels \"\",\"No\",\"Si\": 3 2 1 1 1 1 2 1 3 3 ...\n $ Q11 : Factor w/ 8 levels \"\",\"Flores\",\"Fruto\",..: 6 7 1 1 1 1 3 1 7 7 ...\n $ Q12 : Factor w/ 7 levels \"\",\"Azafrán\",\"Cúrcuma\",..: 7 7 1 1 1 1 7 1 2 3 ...\n $ Q13 : Factor w/ 31 levels \"\",\"Achiote\",\"Achiote, remolacha, tomate\",..: 19 6 1 1 1 1 29 1 8 14 ...\n $ Q14 : Factor w/ 23 levels \" \",\" No conozco ninguna preparación \",..: 13 3 1 1 1 1 2 1 2 2 ...\n $ Q15 : Factor w/ 6 levels \"\",\"1\",\"2\",\"3\",..: 5 6 1 1 1 1 4 1 6 6 ...\n $ Justificacion_2 : Factor w/ 57 levels \"\",\"al saber sus propiedades puedo utilizarlo.\",..: 41 33 1 1 1 1 54 1 34 14 ...\n $ Q16 : Factor w/ 24 levels \" \",\" Otra::no lo se\",..: 18 4 1 1 1 1 15 1 8 14 ...\n $ Q17 : Factor w/ 6 levels \"\",\"1\",\"2\",\"3\",..: 6 2 1 1 1 1 4 1 4 6 ...\n $ Justificacion_3 : Factor w/ 57 levels \"\",\"5. Tiene q tener algun efecto. Pero no creo q curativo\",..: 55 36 1 1 1 1 27 1 20 52 ...\n $ hora_inicio : POSIXct, format: \"2020-11-08 11:01:00\" \"2020-11-08 11:22:00\" ...\n $ hora_final : POSIXct, format: \"2020-11-08 11:05:00\" \"2020-11-08 11:25:00\" ...\n $ tiempo_terminar : 'difftime' num 4 3 1 1 ...\n ..- attr(*, \"units\")= chr \"mins\"\n" ] ], [ [ "## Resultados", "_____no_output_____" ], [ "#### colores para graficas", "_____no_output_____" ] ], [ [ "color.function <- colorRampPalette(c(\"#FFFFFF\" , \"#45094f\" ))\ncolor.ramp <- color.function(n = 10)", "_____no_output_____" ] ], [ [ "#### Procedencia", "_____no_output_____" ] ], [ [ "info <- table(achiote$Procedencia)\nxx <- barplot(info,main=\"Procedencia\", col=color.ramp)\ninfo", "_____no_output_____" ] ], [ [ "#### Municipio", "_____no_output_____" ] ], [ [ "achiote$Municipio[achiote$Municipio == 'Ciudad'] <- 'Guatemala'\nachiote$Municipio[achiote$Municipio == 'Ciudad de Guatemala'] <- 'Guatemala'\nachiote$Municipio[achiote$Municipio == 'GUATEMALA'] <- 'Guatemala'\nachiote$Municipio[achiote$Municipio == 'guatemala'] <- 'Guatemala'\nachiote$Municipio[achiote$Municipio == 'Ciudad Guatemala'] <- 'Guatemala'\nachiote$Municipio[achiote$Municipio == 'En Asunción Mita Jutiapa'] <- 'Asunción Mita'\nachiote$Municipio[achiote$Municipio == 'Guatemala, Fraijanes'] <- 'Fraijanes'\nachiote$Municipio[achiote$Municipio == 'Sta Catarina Pinula'] <- 'Santa Catarina Pinula'\nachiote$Municipio[achiote$Municipio == 'Sta. Catarina Pinula'] <- 'Santa Catarina Pinula'\nachiote$Municipio[achiote$Municipio == 'Villa Canales'] <- 'Villa canales'\nachiote$Municipio[achiote$Municipio == 'Sacatepéquez'] <- 'San Lucas Sacatepéquez'\nachiote$Municipio <- droplevels(achiote$Municipio)\ninfo <- table(achiote$Municipio)\nxx <- barplot(info,main=\"Municipio\", col=color.ramp, cex.lab=2)\ninfo", "_____no_output_____" ] ], [ [ "#### Edad", "_____no_output_____" ] ], [ [ "info <- table(achiote$Edad)\nxx <- barplot(info,main=\"Edad\", col=color.ramp, cex.lab=2)\ninfo", "_____no_output_____" ] ], [ [ "#### Sexo", "_____no_output_____" ] ], [ [ "info <- table(achiote$Sexo)\nxx <- barplot(info,main=\"Sexo\", col=color.ramp, cex.lab=2)\ninfo", "_____no_output_____" ] ], [ [ "#### ¿Ha utilizado usted alguna planta para tratar alguna enfermedad/afeccion?", "_____no_output_____" ] ], [ [ "text <- toupper(as.character(levels(achiote$Q1)))\ntext <- chartr(\"ÁÉÍÓÚ\", \"AEIOU\", text)\ntext_df <- tibble(line = 1:61, text = text)\n\ncustom_stop_words <- bind_rows(stop_words,\n tibble(word = c(tm::stopwords(\"spanish\"), \"dolor\", \"estomago\", \"infusion\", \"colicos\"),\n lexicon = \"custom\"))\n\ntext_df <- text_df %>% \n unnest_tokens(word, text) %>%\n anti_join(custom_stop_words) %>%\n count(word, sort = TRUE)\n\n\ntext_df %>%\n filter(n > 3) %>%\n mutate(word = reorder(word, n)) %>%\n ggplot(aes(word, n)) +\n geom_col() +\n geom_bar(stat=\"identity\", fill = \"#FF6666\") +\n labs(x = \"Plantas utilizadas\", y = \"Repeticiones\") +\n coord_flip()", "Joining, by = \"word\"\n\n" ] ], [ [ "#### ¿Conoce usted el achiote?", "_____no_output_____" ] ], [ [ "info <- table(achiote$Q2)\nxx <- barplot(info,main=\"¿Conoce usted el achiote?\", col=color.ramp, cex.lab=2)\ninfo", "_____no_output_____" ] ], [ [ "#### ¿Estaría dispuesto a conocer más sobre los usos ancestrales del achiote?", "_____no_output_____" ] ], [ [ "temp <- as.character(achiote$Q3)\nsi <- grep(\"Si\", temp, value = TRUE)\nno <- grep(\"No\", temp, value = TRUE) \ninfo <- table(c(si, no))\nxx <- barplot(info,main=\"¿Estaría dispuesto a conocer más sobre los usos del achiote?\", col=color.ramp, cex.lab=2)\ninfo", "_____no_output_____" ] ], [ [ "#### ¿Qué tan efectivo cree usted que son los tratamientos alternativos de achiote?", "_____no_output_____" ] ], [ [ "temp <- as.character(achiote$Q4)\nuno <- grep(1, temp, value = TRUE)\ndos <- grep(2, temp, value = TRUE) \ntres <- grep(3, temp, value = TRUE) \ncuatro <- grep(4, temp, value = TRUE) \ncinco <- grep(5, temp, value = TRUE) \ninfo <- table(c(uno, dos, tres, cuatro, cinco))\nprint(\"1 - No produce ningun efecto\")\nprint(\"2 - El efecto no es notable\")\nprint(\"3 - Levemente efectivo\")\nprint(\"4 - Efecto notable\")\nprint(\"5 - Efecto significativo\")\nxx <- barplot(info,main=\"¿Qué tan efectivo cree son los tratamientos alternativos de achiote?\", col=color.ramp, cex.lab=2)\ninfo", "[1] \"1 - No produce ningun efecto\"\n[1] \"2 - El efecto no es notable\"\n[1] \"3 - Levemente efectivo\"\n[1] \"4 - Efecto notable\"\n[1] \"5 - Efecto significativo\"\n" ] ], [ [ "#### Justificación de nivel de creencia", "_____no_output_____" ] ], [ [ "text <- toupper(as.character(levels(achiote$Justificacion_1)))\ntext <- chartr(\"ÁÉÍÓÚ\", \"AEIOU\", text)\ntext_df <- tibble(text = text)\n\ncustom_stop_words <- bind_rows(stop_words,\n tibble(word = c(tm::stopwords(\"spanish\"), \"dolor\", \"estomago\", \"infusion\", \"colicos\"),\n lexicon = \"custom\"))\n\ntext_df %>% \n unnest_tokens(ngram, text, token = \"ngrams\", n = 2) %>%\n count(ngram, sort = TRUE)", "_____no_output_____" ] ], [ [ "#### ¿Conoce usted otro nombre para el achiote?", "_____no_output_____" ] ], [ [ "info <- table(achiote$Q5)\nxx <- barplot(info,main=\"¿Conoce usted otro nombre para el achiote?\", col=color.ramp, cex.lab=2)\ninfo", "_____no_output_____" ] ], [ [ "#### ¿Qué usos ha oido usted del achiote? ¿cuales usos le ha dado usted al achiote?", "_____no_output_____" ] ], [ [ "text <- toupper(as.character(achiote$Q6))\ntext <- chartr(\"ÁÉÍÓÚ\", \"AEIOU\", text)\ntext_df <- tibble(text = text)\n\ntext_df <- text_df %>% \n unnest_tokens(word, text) %>%\n count(word, sort = TRUE)\n\ntext_df %>%\n filter(n > 1) %>%\n mutate(word = reorder(word, n)) %>%\n ggplot(aes(word, n)) +\n geom_col() +\n geom_bar(stat=\"identity\", fill = \"#AFC8F5\") +\n labs(x = \"Usos\", y = \"Ocurrencias\") +\n coord_flip()\n\ntext_df", "_____no_output_____" ] ], [ [ "#### Conoce como aplicaciones del achiote", "_____no_output_____" ] ], [ [ "text <- toupper(as.character(achiote$Q7))\ntext <- chartr(\"ÁÉÍÓÚ\", \"AEIOU\", text)\ntext_df <- tibble(text = text)\n\ncustom_stop_words <- bind_rows(stop_words,\n tibble(word = c(tm::stopwords(\"spanish\"), \"quita\", \"aumenta\", \"materna\", \"flujo\", \"achiote\", \"aplicacion\", \"conozco\", \"consumo\", \"control\", \"solar\", \"mosquitos\"),\n lexicon = \"custom\"))\n\ntext_df <- text_df %>% \n unnest_tokens(word, text) %>%\n anti_join(custom_stop_words) %>%\n count(word, sort = TRUE)\n\ntext_df %>%\n filter(n > 1) %>%\n mutate(word = reorder(word, n)) %>%\n ggplot(aes(word, n)) +\n geom_col() +\n geom_bar(stat=\"identity\", fill = \"#9299DE\") +\n labs(x = \"Aplicaciones\", y = \"Ocurrencias\") +\n coord_flip()\n\ntext_df", "Joining, by = \"word\"\n\n" ] ], [ [ "#### ¿Con qué frecuencia utiliza el achiote en cualquiera de sus presentaciones?", "_____no_output_____" ] ], [ [ "info <- table(achiote$Q8)\nxx <- barplot(info,main=\"¿Con qué frecuencia utiliza el achiote en cualquiera de sus presentaciones?\", col=color.ramp, cex.lab=2)\ninfo", "_____no_output_____" ] ], [ [ "#### ¿Cómo obtiene usted el achiote?", "_____no_output_____" ] ], [ [ "text <- toupper(as.character(achiote$Q9))\ntext <- chartr(\"ÁÉÍÓÚ\", \"AEIOU\", text)\ntext_df <- tibble(text = text)\n\ncustom_stop_words <- bind_rows(stop_words,\n tibble(word = c(tm::stopwords(\"spanish\"), \"conveniencia\"),\n lexicon = \"custom\"))\n\ntext_df <- text_df %>% \n unnest_tokens(word, text) %>%\n anti_join(custom_stop_words) %>%\n count(word, sort = TRUE)\n\ntext_df %>%\n filter(n > 11) %>%\n mutate(word = reorder(word, n)) %>%\n ggplot(aes(word, n)) +\n geom_col() +\n geom_bar(stat=\"identity\", fill = \"#9299DE\") +\n labs(x = \"Lugar obtencion\", y = \"Ocurrencias\") +\n coord_flip()\n\ntext_df", "Joining, by = \"word\"\n\n" ] ], [ [ "#### ¿Está usted al tanto que es posible conseguir achiote en los centro de conveniencia bajo el apartado de especias?", "_____no_output_____" ] ], [ [ "info <- table(achiote$Q10)\nxx <- barplot(info,main=\"¿conseguir achiote en los centro de conveniencia en especias?\", col=color.ramp, cex.lab=2)\ninfo", "_____no_output_____" ] ], [ [ "#### Seleccione qué parte de la planta de Bixa orellana L. (achiote) se utiliza para obtener el polvo rojo comúnmente comercializado", "_____no_output_____" ] ], [ [ "info <- table(achiote$Q11)\nxx <- barplot(info,\n main=\"qué parte se utiliza para obtener el polvo rojo comúnmente comercializado\",\n col=color.ramp,\n cex.lab=2)\ninfo", "_____no_output_____" ] ], [ [ "#### ¿Conoce alguna otra planta tintorea de comida?", "_____no_output_____" ] ], [ [ "text <- toupper(as.character(achiote$Q12))\ntext <- chartr(\"ÁÉÍÓÚ\", \"AEIOU\", text)\ntext_df <- tibble(text = text)\n\ncustom_stop_words <- bind_rows(stop_words,\n tibble(word = c(tm::stopwords(\"spanish\"), \"otra\", \"no\",\"planta\",\"tintorea\", \"de\", \"cascara\", \"conozco\"),\n lexicon = \"custom\"))\n\ntext_df <- text_df %>% \n unnest_tokens(word, text) %>%\n anti_join(custom_stop_words) %>%\n count(word, sort = TRUE)\n\ntext_df %>%\n mutate(word = reorder(word, n)) %>%\n ggplot(aes(word, n)) +\n geom_col() +\n geom_bar(stat=\"identity\", fill = \"#D9C089\") +\n labs(x = \"Planta\", y = \"Veces mencionada\") +\n coord_flip()\n\ntext_df", "Joining, by = \"word\"\n\n" ] ], [ [ "#### ¿Ha utilizado usted alguna planta tintorea de comida?", "_____no_output_____" ] ], [ [ "text <- toupper(as.character(achiote$Q13))\ntext <- chartr(\"ÁÉÍÓÚ\", \"AEIOU\", text)\ntext_df <- tibble(text = text)\n\ncustom_stop_words <- bind_rows(stop_words,\n tibble(word = c(tm::stopwords(\"spanish\"), \"curtido\", \"hirviendo\",\"40\", \"agua\",\"anteriores\",\"azul\",\"bebidas\",\"cocinar\",\"color\",\"colorear\",\"dar\",\"ensaladas\",\"enchiladas\",\"material\",\"menos\",\"min\",\"morada\",\"morado\",\"obtiene\",\"puede\",\"textiles\",\"usarse\",\"vertiendo\",\"viena\"),\n lexicon = \"custom\"))\n\ntext_df <- text_df %>% \n unnest_tokens(word, text) %>%\n anti_join(custom_stop_words) %>%\n count(word, sort = TRUE)\n\ntext_df %>%\n mutate(word = reorder(word, n)) %>%\n ggplot(aes(word, n)) +\n geom_col() +\n geom_bar(stat=\"identity\", fill = \"#8EF0D0\") +\n labs(x = \"Planta\", y = \"Veces mencionada\") +\n coord_flip()\n\ntext_df", "Joining, by = \"word\"\n\n" ] ], [ [ "#### ¿Que formas de preparacion conoce usted para usar el achiote como medicina?", "_____no_output_____" ] ], [ [ "text <- toupper(as.character(achiote$Q14))\ntext <- chartr(\"ÁÉÍÓÚ\", \"AEIOU\", text)\ntext_df <- tibble(text = text)\n\ncustom_stop_words <- bind_rows(stop_words,\n tibble(word = c(tm::stopwords(\"spanish\"), \"conozco\",\"preparacion\"),\n lexicon = \"custom\"))\n\ntext_df <- text_df %>% \n unnest_tokens(word, text) %>%\n anti_join(custom_stop_words) %>%\n count(word, sort = TRUE)\n\ntext_df %>%\n mutate(word = reorder(word, n)) %>%\n ggplot(aes(word, n)) +\n geom_col() +\n geom_bar(stat=\"identity\", fill = \"#F0B081\") +\n labs(x = \"Preparacion\", y = \"Veces mencionada\") +\n coord_flip()\n\ntext_df", "Joining, by = \"word\"\n\n" ] ], [ [ "#### ¿Que tan dispuesto esta usted a utilizar el achiote como sustituto de la medicina occidental?", "_____no_output_____" ] ], [ [ "temp <- as.character(achiote$Q15)\nuno <- grep(1, temp, value = TRUE)\ndos <- grep(2, temp, value = TRUE) \ntres <- grep(3, temp, value = TRUE) \ncuatro <- grep(4, temp, value = TRUE) \ncinco <- grep(5, temp, value = TRUE) \ninfo <- table(c(uno, dos, tres, cuatro, cinco))\nprint(\"1 - No lo utilizaria\")\nprint(\"2 - Lo utilizaría como último recurso\")\nprint(\"3 - Lo utilizaría\")\nprint(\"4 - Totalmente de acuerdo en utilizarlo\")\nprint(\"5 - Promuevo el uso del achiote como alternativa\")\nxx <- barplot(info,main=\"¿utilizar el achiote como sustituto de la medicina occidental?\", col=color.ramp, cex.lab=2)\ninfo", "[1] \"1 - No lo utilizaria\"\n[1] \"2 - Lo utilizaría como último recurso\"\n[1] \"3 - Lo utilizaría\"\n[1] \"4 - Totalmente de acuerdo en utilizarlo\"\n[1] \"5 - Promuevo el uso del achiote como alternativa\"\n" ] ], [ [ "#### Justificación ¿Que tan dispuesto esta usted a utilizar el achiote como sustituto de la medicina occidental?", "_____no_output_____" ] ], [ [ "text <- toupper(as.character(levels(achiote$Justificacion_2)))\ntext <- chartr(\"ÁÉÍÓÚ\", \"AEIOU\", text)\ntext_df <- tibble(text = text)\n\ntext_df %>% \n unnest_tokens(ngram, text, token = \"ngrams\", n = 3) %>%\n count(ngram, sort = TRUE)", "_____no_output_____" ] ], [ [ "#### maneras en las cuales estaria usted de acuerdo en administrar medicamento fabricado con achiote", "_____no_output_____" ] ], [ [ "text <- toupper(as.character(achiote$Q16))\ntext <- chartr(\"ÁÉÍÓÚ\", \"AEIOU\", text)\ntext_df <- tibble(text = text)\n\ncustom_stop_words <- bind_rows(stop_words,\n tibble(word = c(tm::stopwords(\"spanish\"), \"aplicacion\",\"anteriores\",\"maneras\",\"acuerdo\"),\n lexicon = \"custom\"))\n\ntext_df <- text_df %>% \n unnest_tokens(word, text) %>%\n anti_join(custom_stop_words) %>%\n count(word, sort = TRUE)\n\ntext_df %>%\n mutate(word = reorder(word, n)) %>%\n ggplot(aes(word, n)) +\n geom_col() +\n geom_bar(stat=\"identity\", fill = \"#BCF048\") +\n labs(x = \"Planta\", y = \"Veces mencionada\") +\n coord_flip()\n\ntext_df", "Joining, by = \"word\"\n\n" ] ], [ [ "#### ¿Qué tan efectivos son los tratamientos con achiote?", "_____no_output_____" ] ], [ [ "temp <- as.character(achiote$Q17)\nuno <- grep(1, temp, value = TRUE)\ndos <- grep(2, temp, value = TRUE) \ntres <- grep(3, temp, value = TRUE) \ncuatro <- grep(4, temp, value = TRUE) \ncinco <- grep(5, temp, value = TRUE) \ninfo <- table(c(uno, dos, tres, cuatro, cinco))\nprint(\"1 - No produce ningun efecto\")\nprint(\"2 - El efecto no es notable\")\nprint(\"3 - Levemente efectivo\")\nprint(\"4 - Efecto notable\")\nprint(\"5 - Efecto significativo\")\nxx <- barplot(info,main=\"¿Qué tan efectivo cree son los tratamientos alternativos de achiote?\", col=color.ramp)\ninfo", "[1] \"1 - No produce ningun efecto\"\n[1] \"2 - El efecto no es notable\"\n[1] \"3 - Levemente efectivo\"\n[1] \"4 - Efecto notable\"\n[1] \"5 - Efecto significativo\"\n" ] ], [ [ "#### Justificación pregunta de efectividad del achiote", "_____no_output_____" ] ], [ [ "text <- toupper(as.character(levels(achiote$Justificacion_3)))\ntext <- chartr(\"ÁÉÍÓÚ\", \"AEIOU\", text)\ntext_df <- tibble(text = text)\n\ncustom_stop_words <- bind_rows(stop_words,\n tibble(word = c(tm::stopwords(\"spanish\")),\n lexicon = \"custom\"))\n\ntext_df %>% \n unnest_tokens(ngram, text, token = \"ngrams\", n = 3) %>%\n count(ngram, sort = TRUE)", "_____no_output_____" ] ], [ [ "#### tiempos de finalización de la encuesta", "_____no_output_____" ] ], [ [ "#encuesta más rápida\nmin(achiote$tiempo_terminar)\n\n#promedio tiempo de finalización\nmean(achiote$tiempo_terminar)\n\n#Encuesta más tardada\nmax(achiote$tiempo_terminar)", "_____no_output_____" ] ], [ [ "### division conocedores/entusiastas y no conocedores", "_____no_output_____" ] ], [ [ "A <- split(achiote, achiote$Q2)\nA_no <- A$No\nB <- split(A_no, A_no$Q3, drop = TRUE)\n\nconocedores <- rbind(A$Si, B$Si)\nno_conocedores <- B$No\nstr(conocedores)\nstr(no_conocedores)", "'data.frame':\t99 obs. of 33 variables:\n $ Id : Factor w/ 100 levels \"1\",\"10\",\"100\",..: 1 13 68 90 2 4 5 8 10 11 ...\n $ Ip : Factor w/ 93 levels \"143.208.57.62\",..: 67 45 89 25 60 39 46 44 57 12 ...\n $ Acceso : Factor w/ 1 level \"Public Access\": 1 1 1 1 1 1 1 1 1 1 ...\n $ Fecha_inicio : Date, format: \"2020-09-18\" \"2020-09-18\" ...\n $ Fecha_finalizacion: Date, format: \"2020-09-18\" \"2020-09-18\" ...\n $ Procedencia : Factor w/ 2 levels \"Casco rural\",..: 2 2 2 1 2 2 2 1 2 2 ...\n $ Municipio : Factor w/ 18 levels \"Amatitlan\",\"Asunción Mita\",..: 8 17 8 8 8 12 8 13 14 17 ...\n $ Edad : Factor w/ 3 levels \"15-35 años\",\"36-55 años\",..: 1 1 1 1 1 1 2 1 1 1 ...\n $ Sexo : Factor w/ 2 levels \"Femenino\",\"Masculino\": 2 2 2 2 1 2 1 2 1 1 ...\n $ Q1 : Factor w/ 61 levels \"\",\"\\\"Mata ratón\\\", se cocina y con esa agua se da un baño para bajar la fiebre\",..: 28 38 38 9 51 57 58 18 31 38 ...\n $ Q2 : Factor w/ 2 levels \"No\",\"Si\": 2 2 2 2 2 2 2 2 2 2 ...\n $ Q3 : Factor w/ 3 levels \"\",\"No\",\"Si\": 1 1 1 1 1 1 1 1 1 1 ...\n $ Q4 : Factor w/ 6 levels \"\",\"1\",\"2\",\"3\",..: 1 1 1 1 1 1 1 1 1 1 ...\n $ Justificacion_1 : Factor w/ 29 levels \"\",\"Conozco que tiene propiedades positivas pero no sé cuán efectivas sean en las personas\",..: 1 1 1 1 1 1 1 1 1 1 ...\n $ He leido : Factor w/ 2 levels \"\",\"si\": 1 1 1 1 1 1 1 1 1 1 ...\n $ Q5 : Factor w/ 4 levels \"\",\"Bija\",\"No\",..: 3 3 3 3 3 3 3 3 3 3 ...\n $ Q6 : Factor w/ 12 levels \" \",\" Medicinal \",..: 9 9 9 10 9 7 3 11 10 9 ...\n $ Q7 : Factor w/ 23 levels \" \",\" No conozco ninguna aplicación del achiote \",..: 19 8 8 8 8 7 3 10 13 13 ...\n $ Q8 : Factor w/ 5 levels \"\",\"Mensualmente\",..: 5 5 5 2 2 3 4 3 3 3 ...\n $ Q9 : Factor w/ 12 levels \" \",\" No obtengo achiote \",..: 9 9 10 9 5 9 11 7 9 2 ...\n $ Q10 : Factor w/ 3 levels \"\",\"No\",\"Si\": 3 2 2 3 3 2 3 3 3 3 ...\n $ Q11 : Factor w/ 8 levels \"\",\"Flores\",\"Fruto\",..: 6 7 3 7 7 6 7 6 6 6 ...\n $ Q12 : Factor w/ 7 levels \"\",\"Azafrán\",\"Cúrcuma\",..: 7 7 7 2 3 7 6 2 7 2 ...\n $ Q13 : Factor w/ 31 levels \"\",\"Achiote\",\"Achiote, remolacha, tomate\",..: 19 6 29 8 14 19 2 19 19 19 ...\n $ Q14 : Factor w/ 23 levels \" \",\" No conozco ninguna preparación \",..: 13 3 2 2 2 22 2 6 11 2 ...\n $ Q15 : Factor w/ 6 levels \"\",\"1\",\"2\",\"3\",..: 5 6 4 6 6 6 5 5 5 4 ...\n $ Justificacion_2 : Factor w/ 57 levels \"\",\"al saber sus propiedades puedo utilizarlo.\",..: 41 33 54 34 14 20 46 29 49 1 ...\n $ Q16 : Factor w/ 24 levels \" \",\" Otra::no lo se\",..: 18 4 15 8 14 14 12 12 17 4 ...\n $ Q17 : Factor w/ 6 levels \"\",\"1\",\"2\",\"3\",..: 6 2 4 4 6 4 5 4 5 4 ...\n $ Justificacion_3 : Factor w/ 57 levels \"\",\"5. Tiene q tener algun efecto. Pero no creo q curativo\",..: 55 36 27 20 52 42 12 1 13 1 ...\n $ hora_inicio : POSIXct, format: \"2020-11-08 11:01:00\" \"2020-11-08 11:22:00\" ...\n $ hora_final : POSIXct, format: \"2020-11-08 11:05:00\" \"2020-11-08 11:25:00\" ...\n $ tiempo_terminar : 'difftime' num 4 3 6 9 ...\n ..- attr(*, \"units\")= chr \"mins\"\n'data.frame':\t2 obs. of 33 variables:\n $ Id : Factor w/ 100 levels \"1\",\"10\",\"100\",..: 44 60\n $ Ip : Factor w/ 93 levels \"143.208.57.62\",..: 53 71\n $ Acceso : Factor w/ 1 level \"Public Access\": 1 1\n $ Fecha_inicio : Date, format: \"2020-09-21\" \"2020-09-21\"\n $ Fecha_finalizacion: Date, format: \"2020-09-21\" \"2020-09-21\"\n $ Procedencia : Factor w/ 2 levels \"Casco rural\",..: 1 2\n $ Municipio : Factor w/ 18 levels \"Amatitlan\",\"Asunción Mita\",..: 16 8\n $ Edad : Factor w/ 3 levels \"15-35 años\",\"36-55 años\",..: 1 3\n $ Sexo : Factor w/ 2 levels \"Femenino\",\"Masculino\": 2 1\n $ Q1 : Factor w/ 61 levels \"\",\"\\\"Mata ratón\\\", se cocina y con esa agua se da un baño para bajar la fiebre\",..: 38 45\n $ Q2 : Factor w/ 2 levels \"No\",\"Si\": 1 1\n $ Q3 : Factor w/ 3 levels \"\",\"No\",\"Si\": 2 2\n $ Q4 : Factor w/ 6 levels \"\",\"1\",\"2\",\"3\",..: 4 2\n $ Justificacion_1 : Factor w/ 29 levels \"\",\"Conozco que tiene propiedades positivas pero no sé cuán efectivas sean en las personas\",..: 28 16\n $ He leido : Factor w/ 2 levels \"\",\"si\": 1 1\n $ Q5 : Factor w/ 4 levels \"\",\"Bija\",\"No\",..: 1 1\n $ Q6 : Factor w/ 12 levels \" \",\" Medicinal \",..: 1 1\n $ Q7 : Factor w/ 23 levels \" \",\" No conozco ninguna aplicación del achiote \",..: 1 1\n $ Q8 : Factor w/ 5 levels \"\",\"Mensualmente\",..: 1 1\n $ Q9 : Factor w/ 12 levels \" \",\" No obtengo achiote \",..: 1 1\n $ Q10 : Factor w/ 3 levels \"\",\"No\",\"Si\": 1 1\n $ Q11 : Factor w/ 8 levels \"\",\"Flores\",\"Fruto\",..: 1 1\n $ Q12 : Factor w/ 7 levels \"\",\"Azafrán\",\"Cúrcuma\",..: 1 1\n $ Q13 : Factor w/ 31 levels \"\",\"Achiote\",\"Achiote, remolacha, tomate\",..: 1 1\n $ Q14 : Factor w/ 23 levels \" \",\" No conozco ninguna preparación \",..: 1 1\n $ Q15 : Factor w/ 6 levels \"\",\"1\",\"2\",\"3\",..: 1 1\n $ Justificacion_2 : Factor w/ 57 levels \"\",\"al saber sus propiedades puedo utilizarlo.\",..: 1 1\n $ Q16 : Factor w/ 24 levels \" \",\" Otra::no lo se\",..: 1 1\n $ Q17 : Factor w/ 6 levels \"\",\"1\",\"2\",\"3\",..: 1 1\n $ Justificacion_3 : Factor w/ 57 levels \"\",\"5. Tiene q tener algun efecto. Pero no creo q curativo\",..: 1 1\n $ hora_inicio : POSIXct, format: \"2020-11-08 14:31:00\" \"2020-11-08 18:53:00\"\n $ hora_final : POSIXct, format: \"2020-11-08 14:33:00\" \"2020-11-08 18:59:00\"\n $ tiempo_terminar : 'difftime' num 2 6\n ..- attr(*, \"units\")= chr \"mins\"\n" ] ], [ [ "### Division variables cuantitativas y cualitativas", "_____no_output_____" ] ], [ [ "conocedores_cuanti <- conocedores[c(\"Procedencia\", \"Edad\", \"Sexo\", \"Q5\", \"Q8\", \"Q10\", \"Q11\", \"Q15\", \"Q17\")]\nconocedores_cuali <- conocedores[c(\"Municipio\", \"Q6\", \"Q7\", \"Q9\", \"Q12\", \"Q13\", \"Q14\", \"Q16\")]\n\nconocedores_cuanti$Procedencia <- as.numeric(conocedores_cuanti$Procedencia)\nconocedores_cuanti$Edad <- as.numeric(conocedores_cuanti$Edad)\nconocedores_cuanti$Sexo <- as.numeric(conocedores_cuanti$Sexo)\nconocedores_cuanti$Q5 <- as.numeric(conocedores_cuanti$Q5)\nconocedores_cuanti$Q8 <- as.numeric(conocedores_cuanti$Q8)\nconocedores_cuanti$Q10 <- as.numeric(conocedores_cuanti$Q10)\nconocedores_cuanti$Q11 <- as.numeric(conocedores_cuanti$Q11)\n\ncorr_cono<- cor(data.matrix(conocedores_cuanti), method = c(\"pearson\", \"kendall\", \"spearman\"))\ncorr_cono\ncorrplot(corr_cono, type = \"upper\", order = \"hclust\", \n tl.col = \"black\", tl.srt = 45)\n\ncono_cuanti <- conocedores_cuanti", "_____no_output_____" ] ], [ [ "### Clustering", "_____no_output_____" ] ], [ [ "conocedores_cuanti <- data.matrix(conocedores_cuanti)\nconocedores_cuanti <- na.omit(conocedores_cuanti) # listwise deletion of missing\nconocedores_cuanti <- scale(conocedores_cuanti) # standardize variables\n\n#Cantidad de clusters\nwss <- (nrow(conocedores_cuanti)-1)*sum(apply(conocedores_cuanti,2,var))\nfor (i in 2:15) wss[i] <- sum(kmeans(conocedores_cuanti,\n centers=i)$withinss)\nplot(1:15, wss, type=\"b\", xlab=\"Number of Clusters\",\n ylab=\"Within groups sum of squares\")\n\n## K-Means Cluster Analysis\nfit <- kmeans(conocedores_cuanti, 4) # 5 cluster solution\n# get cluster means\naggregate(conocedores_cuanti,by=list(fit$cluster),FUN=mean)\n", "_____no_output_____" ], [ "#Plot del cluster\nclusplot(conocedores_cuanti, fit$cluster, color=TRUE, shade=TRUE,\n labels=2, lines=0)\n\n# append cluster assignment\ncono_cuanti <- data.frame(cono_cuanti, fit$cluster)\n", "_____no_output_____" ], [ "fit", "_____no_output_____" ], [ "B <- split(cono_cuanti, cono_cuanti$fit.cluster)\ngroup_1 <- B$\"1\"\ntemp <- as.character(group_1$Q17)\ninfo <- table(temp)\nprint(\"1 - No produce ningun efecto\")\nprint(\"2 - El efecto no es notable\")\nprint(\"3 - Levemente efectivo\")\nprint(\"4 - Efecto notable\")\nprint(\"5 - Efecto significativo\")\nxx <- barplot(info,main=\"¿Qué tan efectivo cree son los tratamientos alternativos de achiote?\", col=color.ramp)\ninfo", "[1] \"1 - No produce ningun efecto\"\n[1] \"2 - El efecto no es notable\"\n[1] \"3 - Levemente efectivo\"\n[1] \"4 - Efecto notable\"\n[1] \"5 - Efecto significativo\"\n" ], [ "fit$cluster", "_____no_output_____" ] ], [ [ "### Division rango edad", "_____no_output_____" ] ], [ [ "Y <- split(achiote, achiote$Edad)\nquince35 <- Y$\"15-35\"\ntresseis55 <- Y$\"36-55\"\ncincoseis75 <- Y$\"56-75\"", "_____no_output_____" ], [ "str(tresseis55)", "'data.frame':\t4 obs. of 33 variables:\n $ Id : Factor w/ 100 levels \"1\",\"10\",\"100\",..: 5 19 43 61\n $ Ip : Factor w/ 93 levels \"143.208.57.62\",..: 46 41 59 29\n $ Acceso : Factor w/ 1 level \"Public Access\": 1 1 1 1\n $ Fecha_inicio : Date, format: \"2020-09-18\" \"2020-09-18\" ...\n $ Fecha_finalizacion: Date, format: \"2020-09-18\" \"2020-09-18\" ...\n $ Procedencia : Factor w/ 2 levels \"Casco rural\",..: 2 2 2 2\n $ Municipio : Factor w/ 18 levels \"Amatitlan\",\"Asunción Mita\",..: 8 17 8 8\n $ Edad : Factor w/ 3 levels \"15-35 años\",\"36-55 años\",..: 2 2 2 2\n $ Sexo : Factor w/ 2 levels \"Femenino\",\"Masculino\": 1 1 1 1\n $ Q1 : Factor w/ 61 levels \"\",\"\\\"Mata ratón\\\", se cocina y con esa agua se da un baño para bajar la fiebre\",..: 58 38 38 27\n $ Q2 : Factor w/ 2 levels \"No\",\"Si\": 2 2 2 2\n $ Q3 : Factor w/ 3 levels \"\",\"No\",\"Si\": 1 1 1 1\n $ Q4 : Factor w/ 6 levels \"\",\"1\",\"2\",\"3\",..: 1 1 1 1\n $ Justificacion_1 : Factor w/ 29 levels \"\",\"Conozco que tiene propiedades positivas pero no sé cuán efectivas sean en las personas\",..: 1 1 1 1\n $ He leido : Factor w/ 2 levels \"\",\"si\": 1 1 1 1\n $ Q5 : Factor w/ 4 levels \"\",\"Bija\",\"No\",..: 3 3 3 3\n $ Q6 : Factor w/ 12 levels \" \",\" Medicinal \",..: 3 3 3 2\n $ Q7 : Factor w/ 23 levels \" \",\" No conozco ninguna aplicación del achiote \",..: 3 3 3 3\n $ Q8 : Factor w/ 5 levels \"\",\"Mensualmente\",..: 4 5 5 4\n $ Q9 : Factor w/ 12 levels \" \",\" No obtengo achiote \",..: 11 11 9 9\n $ Q10 : Factor w/ 3 levels \"\",\"No\",\"Si\": 3 3 2 2\n $ Q11 : Factor w/ 8 levels \"\",\"Flores\",\"Fruto\",..: 7 5 3 5\n $ Q12 : Factor w/ 7 levels \"\",\"Azafrán\",\"Cúrcuma\",..: 6 2 2 2\n $ Q13 : Factor w/ 31 levels \"\",\"Achiote\",\"Achiote, remolacha, tomate\",..: 2 5 1 19\n $ Q14 : Factor w/ 23 levels \" \",\" No conozco ninguna preparación \",..: 2 2 2 2\n $ Q15 : Factor w/ 6 levels \"\",\"1\",\"2\",\"3\",..: 5 4 3 5\n $ Justificacion_2 : Factor w/ 57 levels \"\",\"al saber sus propiedades puedo utilizarlo.\",..: 46 22 1 50\n $ Q16 : Factor w/ 24 levels \" \",\" Otra::no lo se\",..: 12 2 16 14\n $ Q17 : Factor w/ 6 levels \"\",\"1\",\"2\",\"3\",..: 5 2 4 4\n $ Justificacion_3 : Factor w/ 57 levels \"\",\"5. Tiene q tener algun efecto. Pero no creo q curativo\",..: 12 35 1 29\n $ hora_inicio : POSIXct, format: \"2020-11-08 11:40:00\" \"2020-11-08 12:30:00\" ...\n $ hora_final : POSIXct, format: \"2020-11-08 11:53:00\" \"2020-11-08 12:34:00\" ...\n $ tiempo_terminar : 'difftime' num 13 4 4 15\n ..- attr(*, \"units\")= chr \"mins\"\n" ] ], [ [ "### 15 - 35 años", "_____no_output_____" ] ], [ [ "text <- toupper(as.character(quince35$Q7))\ntext <- chartr(\"ÁÉÍÓÚ\", \"AEIOU\", text)\ntext_df <- tibble(text = text)\n\ncustom_stop_words <- bind_rows(stop_words,\n tibble(word = c(tm::stopwords(\"spanish\"), \"quita\", \"aumenta\", \"materna\", \"flujo\", \"achiote\", \"aplicacion\", \"conozco\", \"consumo\", \"control\", \"solar\", \"mosquitos\"),\n lexicon = \"custom\"))\n\ntext_df <- text_df %>% \n unnest_tokens(word, text) %>%\n anti_join(custom_stop_words) %>%\n count(word, sort = TRUE)\n\ntext_df %>%\n filter(n > 1) %>%\n mutate(word = reorder(word, n)) %>%\n ggplot(aes(word, n)) +\n geom_col() +\n geom_bar(stat=\"identity\", fill = \"#9299DE\") +\n labs(x = \"Aplicaciones\", y = \"Ocurrencias\") +\n coord_flip()\n\ntext_df", "Joining, by = \"word\"\n\n" ], [ "info <- table(quince35$Q10)\nxx <- barplot(info,main=\"¿conseguir achiote en los centro de conveniencia en especias?\", col=color.ramp, cex.lab=2)\ninfo", "_____no_output_____" ], [ "info <- table(quince35$Q11)\nxx <- barplot(info,\n main=\"qué parte se utiliza para obtener el polvo rojo comúnmente comercializado\",\n col=color.ramp,\n cex.lab=2)\ninfo", "_____no_output_____" ] ], [ [ "### 36 -55", "_____no_output_____" ] ], [ [ "str(tresseis55)", " NULL\n" ], [ "text <- toupper(as.character(tresseis55$Q7))\ntext <- chartr(\"ÁÉÍÓÚ\", \"AEIOU\", text)\ntext_df <- tibble(text = text)\n\ncustom_stop_words <- bind_rows(stop_words,\n tibble(word = c(tm::stopwords(\"spanish\"), \"quita\", \"aumenta\", \"materna\", \"flujo\", \"achiote\", \"aplicacion\", \"conozco\", \"consumo\", \"control\", \"solar\", \"mosquitos\"),\n lexicon = \"custom\"))\n\ntext_df <- text_df %>% \n unnest_tokens(word, text) %>%\n anti_join(custom_stop_words) %>%\n count(word, sort = TRUE)\n\ntext_df %>%\n filter(n > 1) %>%\n mutate(word = reorder(word, n)) %>%\n ggplot(aes(word, n)) +\n geom_col() +\n geom_bar(stat=\"identity\", fill = \"#9299DE\") +\n labs(x = \"Aplicaciones\", y = \"Ocurrencias\") +\n coord_flip()\n\ntext_df", "Joining, by = \"word\"\n\n" ], [ "info <- table(tresseis55$Q10)\nxx <- barplot(info,main=\"¿conseguir achiote en los centro de conveniencia en especias?\", col=color.ramp, cex.lab=2)\ninfo", "_____no_output_____" ], [ "info <- table(tresseis55$Q11)\nxx <- barplot(info,\n main=\"qué parte se utiliza para obtener el polvo rojo comúnmente comercializado\",\n col=color.ramp,\n cex.lab=2)\ninfo", "_____no_output_____" ] ], [ [ "### 56 - 75", "_____no_output_____" ] ], [ [ "text <- toupper(as.character(cincoseis75$Q7))\ntext <- chartr(\"ÁÉÍÓÚ\", \"AEIOU\", text)\ntext_df <- tibble(text = text)\n\ncustom_stop_words <- bind_rows(stop_words,\n tibble(word = c(tm::stopwords(\"spanish\"), \"quita\", \"aumenta\", \"materna\", \"flujo\", \"achiote\", \"aplicacion\", \"conozco\", \"consumo\", \"control\", \"solar\", \"mosquitos\"),\n lexicon = \"custom\"))\n\ntext_df <- text_df %>% \n unnest_tokens(word, text) %>%\n anti_join(custom_stop_words) %>%\n count(word, sort = TRUE)\n\ntext_df %>%\n filter(n > 1) %>%\n mutate(word = reorder(word, n)) %>%\n ggplot(aes(word, n)) +\n geom_col() +\n geom_bar(stat=\"identity\", fill = \"#9299DE\") +\n labs(x = \"Aplicaciones\", y = \"Ocurrencias\") +\n coord_flip()\n\ntext_df", "Joining, by = \"word\"\n\n" ], [ "info <- table(cincoseis75$Q10)\nxx <- barplot(info,main=\"¿conseguir achiote en los centro de conveniencia en especias?\", col=color.ramp, cex.lab=2)\ninfo", "_____no_output_____" ], [ "info <- table(cincoseis75$Q11)\nxx <- barplot(info,\n main=\"qué parte se utiliza para obtener el polvo rojo comúnmente comercializado\",\n col=color.ramp,\n cex.lab=2)\ninfo", "_____no_output_____" ] ], [ [ "### correlación de variables", "_____no_output_____" ] ], [ [ "colnames(achiote)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
4a7be97589beae51811c13529594be64dc0750a2
919
ipynb
Jupyter Notebook
notebooks/book1/08/smooth-vs-nonsmooth-1d.ipynb
patel-zeel/pyprobml
027ef3c13a2a63d958e05fdedb68fd7b8f0e0261
[ "MIT" ]
null
null
null
notebooks/book1/08/smooth-vs-nonsmooth-1d.ipynb
patel-zeel/pyprobml
027ef3c13a2a63d958e05fdedb68fd7b8f0e0261
[ "MIT" ]
1
2022-03-27T04:59:50.000Z
2022-03-27T04:59:50.000Z
notebooks/book1/08/smooth-vs-nonsmooth-1d.ipynb
patel-zeel/pyprobml
027ef3c13a2a63d958e05fdedb68fd7b8f0e0261
[ "MIT" ]
2
2022-03-26T11:52:36.000Z
2022-03-27T05:17:48.000Z
22.975
76
0.501632
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
4a7bf26c2403685312abdb3a6aa520531c41eb65
115,907
ipynb
Jupyter Notebook
figures/repeats/logistic.ipynb
kose-y/MendelIHT.jl
9d414dd71dc554e3410affd61ba1216d90108373
[ "MIT" ]
10
2019-11-20T09:46:16.000Z
2022-02-19T17:32:07.000Z
figures/repeats/logistic.ipynb
kose-y/MendelIHT.jl
9d414dd71dc554e3410affd61ba1216d90108373
[ "MIT" ]
13
2019-10-16T20:51:44.000Z
2022-03-30T19:32:12.000Z
figures/repeats/logistic.ipynb
kose-y/MendelIHT.jl
9d414dd71dc554e3410affd61ba1216d90108373
[ "MIT" ]
2
2022-02-15T17:21:19.000Z
2022-02-16T08:15:23.000Z
30.558133
1,126
0.499116
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
4a7bfada844305f8adc6761087a03deeef9c8fc3
588,316
ipynb
Jupyter Notebook
06-week/05_09_Principal_Component_Analysis.ipynb
lincolnvs/Aceleradev-DataScience
42a7bc79ac56699d6233933c75c45235786bacf4
[ "MIT" ]
3
2019-09-02T16:26:27.000Z
2020-05-27T22:43:39.000Z
06-week/05_09_Principal_Component_Analysis.ipynb
lincolnvs/Aceleradev-DataScience
42a7bc79ac56699d6233933c75c45235786bacf4
[ "MIT" ]
null
null
null
06-week/05_09_Principal_Component_Analysis.ipynb
lincolnvs/Aceleradev-DataScience
42a7bc79ac56699d6233933c75c45235786bacf4
[ "MIT" ]
3
2019-11-09T19:35:44.000Z
2020-06-13T22:34:57.000Z
457.833463
183,366
0.921879
[ [ [ "<!--BOOK_INFORMATION-->\n<img align=\"left\" style=\"padding-right:10px;\" src=\"https://github.com/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/figures/PDSH-cover-small.png?raw=1\">\n\n*This notebook contains an excerpt from the [Python Data Science Handbook](http://shop.oreilly.com/product/0636920034919.do) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/PythonDataScienceHandbook).*\n\n*The text is released under the [CC-BY-NC-ND license](https://creativecommons.org/licenses/by-nc-nd/3.0/us/legalcode), and code is released under the [MIT license](https://opensource.org/licenses/MIT). If you find this content useful, please consider supporting the work by [buying the book](http://shop.oreilly.com/product/0636920034919.do)!*", "_____no_output_____" ], [ "<!--NAVIGATION-->\n< [In-Depth: Decision Trees and Random Forests](05.08-Random-Forests.ipynb) | [Contents](Index.ipynb) | [In-Depth: Manifold Learning](05.10-Manifold-Learning.ipynb) >\n\n<a href=\"https://colab.research.google.com/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/05.09-Principal-Component-Analysis.ipynb\"><img align=\"left\" src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open in Colab\" title=\"Open and Execute in Google Colaboratory\"></a>\n", "_____no_output_____" ], [ "# In Depth: Principal Component Analysis", "_____no_output_____" ], [ "Up until now, we have been looking in depth at supervised learning estimators: those estimators that predict labels based on labeled training data.\nHere we begin looking at several unsupervised estimators, which can highlight interesting aspects of the data without reference to any known labels.\n\nIn this section, we explore what is perhaps one of the most broadly used of unsupervised algorithms, principal component analysis (PCA).\nPCA is fundamentally a dimensionality reduction algorithm, but it can also be useful as a tool for visualization, for noise filtering, for feature extraction and engineering, and much more.\nAfter a brief conceptual discussion of the PCA algorithm, we will see a couple examples of these further applications.\n\nWe begin with the standard imports:", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns; sns.set()", "_____no_output_____" ] ], [ [ "## Introducing Principal Component Analysis\n\nPrincipal component analysis is a fast and flexible unsupervised method for dimensionality reduction in data, which we saw briefly in [Introducing Scikit-Learn](05.02-Introducing-Scikit-Learn.ipynb).\nIts behavior is easiest to visualize by looking at a two-dimensional dataset.\nConsider the following 200 points:", "_____no_output_____" ] ], [ [ "rng = np.random.RandomState(1)\nX = np.dot(rng.rand(2, 2), rng.randn(2, 200)).T\nplt.scatter(X[:, 0], X[:, 1])\nplt.axis('equal');", "_____no_output_____" ] ], [ [ "By eye, it is clear that there is a nearly linear relationship between the x and y variables.\nThis is reminiscent of the linear regression data we explored in [In Depth: Linear Regression](05.06-Linear-Regression.ipynb), but the problem setting here is slightly different: rather than attempting to *predict* the y values from the x values, the unsupervised learning problem attempts to learn about the *relationship* between the x and y values.\n\nIn principal component analysis, this relationship is quantified by finding a list of the *principal axes* in the data, and using those axes to describe the dataset.\nUsing Scikit-Learn's ``PCA`` estimator, we can compute this as follows:", "_____no_output_____" ] ], [ [ "from sklearn.decomposition import PCA\npca = PCA(n_components=2)\npca.fit(X)", "_____no_output_____" ] ], [ [ "The fit learns some quantities from the data, most importantly the \"components\" and \"explained variance\":", "_____no_output_____" ] ], [ [ "print(pca.components_)", "[[ 0.94446029 0.32862557]\n [ 0.32862557 -0.94446029]]\n" ], [ "print(pca.explained_variance_)", "[ 0.75871884 0.01838551]\n" ] ], [ [ "To see what these numbers mean, let's visualize them as vectors over the input data, using the \"components\" to define the direction of the vector, and the \"explained variance\" to define the squared-length of the vector:", "_____no_output_____" ] ], [ [ "def draw_vector(v0, v1, ax=None):\n ax = ax or plt.gca()\n arrowprops=dict(arrowstyle='->',\n linewidth=2,\n shrinkA=0, shrinkB=0)\n ax.annotate('', v1, v0, arrowprops=arrowprops)\n\n# plot data\nplt.scatter(X[:, 0], X[:, 1], alpha=0.2)\nfor length, vector in zip(pca.explained_variance_, pca.components_):\n v = vector * 3 * np.sqrt(length)\n draw_vector(pca.mean_, pca.mean_ + v)\nplt.axis('equal');", "_____no_output_____" ] ], [ [ "These vectors represent the *principal axes* of the data, and the length of the vector is an indication of how \"important\" that axis is in describing the distribution of the data—more precisely, it is a measure of the variance of the data when projected onto that axis.\nThe projection of each data point onto the principal axes are the \"principal components\" of the data.\n\nIf we plot these principal components beside the original data, we see the plots shown here:", "_____no_output_____" ], [ "![](https://github.com/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/figures/05.09-PCA-rotation.png?raw=1)\n[figure source in Appendix](06.00-Figure-Code.ipynb#Principal-Components-Rotation)", "_____no_output_____" ], [ "This transformation from data axes to principal axes is an *affine transformation*, which basically means it is composed of a translation, rotation, and uniform scaling.\n\nWhile this algorithm to find principal components may seem like just a mathematical curiosity, it turns out to have very far-reaching applications in the world of machine learning and data exploration.", "_____no_output_____" ], [ "### PCA as dimensionality reduction\n\nUsing PCA for dimensionality reduction involves zeroing out one or more of the smallest principal components, resulting in a lower-dimensional projection of the data that preserves the maximal data variance.\n\nHere is an example of using PCA as a dimensionality reduction transform:", "_____no_output_____" ] ], [ [ "pca = PCA(n_components=1)\npca.fit(X)\nX_pca = pca.transform(X)\nprint(\"original shape: \", X.shape)\nprint(\"transformed shape:\", X_pca.shape)", "original shape: (200, 2)\ntransformed shape: (200, 1)\n" ] ], [ [ "The transformed data has been reduced to a single dimension.\nTo understand the effect of this dimensionality reduction, we can perform the inverse transform of this reduced data and plot it along with the original data:", "_____no_output_____" ] ], [ [ "X_new = pca.inverse_transform(X_pca)\nplt.scatter(X[:, 0], X[:, 1], alpha=0.2)\nplt.scatter(X_new[:, 0], X_new[:, 1], alpha=0.8)\nplt.axis('equal');", "_____no_output_____" ] ], [ [ "The light points are the original data, while the dark points are the projected version.\nThis makes clear what a PCA dimensionality reduction means: the information along the least important principal axis or axes is removed, leaving only the component(s) of the data with the highest variance.\nThe fraction of variance that is cut out (proportional to the spread of points about the line formed in this figure) is roughly a measure of how much \"information\" is discarded in this reduction of dimensionality.\n\nThis reduced-dimension dataset is in some senses \"good enough\" to encode the most important relationships between the points: despite reducing the dimension of the data by 50%, the overall relationship between the data points are mostly preserved.", "_____no_output_____" ], [ "### PCA for visualization: Hand-written digits\n\nThe usefulness of the dimensionality reduction may not be entirely apparent in only two dimensions, but becomes much more clear when looking at high-dimensional data.\nTo see this, let's take a quick look at the application of PCA to the digits data we saw in [In-Depth: Decision Trees and Random Forests](05.08-Random-Forests.ipynb).\n\nWe start by loading the data:", "_____no_output_____" ] ], [ [ "from sklearn.datasets import load_digits\ndigits = load_digits()\ndigits.data.shape", "_____no_output_____" ] ], [ [ "Recall that the data consists of 8×8 pixel images, meaning that they are 64-dimensional.\nTo gain some intuition into the relationships between these points, we can use PCA to project them to a more manageable number of dimensions, say two:", "_____no_output_____" ] ], [ [ "pca = PCA(2) # project from 64 to 2 dimensions\nprojected = pca.fit_transform(digits.data)\nprint(digits.data.shape)\nprint(projected.shape)", "(1797, 64)\n(1797, 2)\n" ] ], [ [ "We can now plot the first two principal components of each point to learn about the data:", "_____no_output_____" ] ], [ [ "plt.scatter(projected[:, 0], projected[:, 1],\n c=digits.target, edgecolor='none', alpha=0.5,\n cmap=plt.cm.get_cmap('spectral', 10))\nplt.xlabel('component 1')\nplt.ylabel('component 2')\nplt.colorbar();", "_____no_output_____" ] ], [ [ "Recall what these components mean: the full data is a 64-dimensional point cloud, and these points are the projection of each data point along the directions with the largest variance.\nEssentially, we have found the optimal stretch and rotation in 64-dimensional space that allows us to see the layout of the digits in two dimensions, and have done this in an unsupervised manner—that is, without reference to the labels.", "_____no_output_____" ], [ "### What do the components mean?\n\nWe can go a bit further here, and begin to ask what the reduced dimensions *mean*.\nThis meaning can be understood in terms of combinations of basis vectors.\nFor example, each image in the training set is defined by a collection of 64 pixel values, which we will call the vector $x$:\n\n$$\nx = [x_1, x_2, x_3 \\cdots x_{64}]\n$$\n\nOne way we can think about this is in terms of a pixel basis.\nThat is, to construct the image, we multiply each element of the vector by the pixel it describes, and then add the results together to build the image:\n\n$$\n{\\rm image}(x) = x_1 \\cdot{\\rm (pixel~1)} + x_2 \\cdot{\\rm (pixel~2)} + x_3 \\cdot{\\rm (pixel~3)} \\cdots x_{64} \\cdot{\\rm (pixel~64)}\n$$\n\nOne way we might imagine reducing the dimension of this data is to zero out all but a few of these basis vectors.\nFor example, if we use only the first eight pixels, we get an eight-dimensional projection of the data, but it is not very reflective of the whole image: we've thrown out nearly 90% of the pixels!", "_____no_output_____" ], [ "![](https://github.com/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/figures/05.09-digits-pixel-components.png?raw=1)\n[figure source in Appendix](06.00-Figure-Code.ipynb#Digits-Pixel-Components)", "_____no_output_____" ], [ "The upper row of panels shows the individual pixels, and the lower row shows the cumulative contribution of these pixels to the construction of the image.\nUsing only eight of the pixel-basis components, we can only construct a small portion of the 64-pixel image.\nWere we to continue this sequence and use all 64 pixels, we would recover the original image.", "_____no_output_____" ], [ "But the pixel-wise representation is not the only choice of basis. We can also use other basis functions, which each contain some pre-defined contribution from each pixel, and write something like\n\n$$\nimage(x) = {\\rm mean} + x_1 \\cdot{\\rm (basis~1)} + x_2 \\cdot{\\rm (basis~2)} + x_3 \\cdot{\\rm (basis~3)} \\cdots\n$$\n\nPCA can be thought of as a process of choosing optimal basis functions, such that adding together just the first few of them is enough to suitably reconstruct the bulk of the elements in the dataset.\nThe principal components, which act as the low-dimensional representation of our data, are simply the coefficients that multiply each of the elements in this series.\nThis figure shows a similar depiction of reconstructing this digit using the mean plus the first eight PCA basis functions:", "_____no_output_____" ], [ "![](https://github.com/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/figures/05.09-digits-pca-components.png?raw=1)\n[figure source in Appendix](06.00-Figure-Code.ipynb#Digits-PCA-Components)", "_____no_output_____" ], [ "Unlike the pixel basis, the PCA basis allows us to recover the salient features of the input image with just a mean plus eight components!\nThe amount of each pixel in each component is the corollary of the orientation of the vector in our two-dimensional example.\nThis is the sense in which PCA provides a low-dimensional representation of the data: it discovers a set of basis functions that are more efficient than the native pixel-basis of the input data.", "_____no_output_____" ], [ "### Choosing the number of components\n\nA vital part of using PCA in practice is the ability to estimate how many components are needed to describe the data.\nThis can be determined by looking at the cumulative *explained variance ratio* as a function of the number of components:", "_____no_output_____" ] ], [ [ "pca = PCA().fit(digits.data)\nplt.plot(np.cumsum(pca.explained_variance_ratio_))\nplt.xlabel('number of components')\nplt.ylabel('cumulative explained variance');", "_____no_output_____" ] ], [ [ "This curve quantifies how much of the total, 64-dimensional variance is contained within the first $N$ components.\nFor example, we see that with the digits the first 10 components contain approximately 75% of the variance, while you need around 50 components to describe close to 100% of the variance.\n\nHere we see that our two-dimensional projection loses a lot of information (as measured by the explained variance) and that we'd need about 20 components to retain 90% of the variance. Looking at this plot for a high-dimensional dataset can help you understand the level of redundancy present in multiple observations.", "_____no_output_____" ], [ "## PCA as Noise Filtering\n\nPCA can also be used as a filtering approach for noisy data.\nThe idea is this: any components with variance much larger than the effect of the noise should be relatively unaffected by the noise.\nSo if you reconstruct the data using just the largest subset of principal components, you should be preferentially keeping the signal and throwing out the noise.\n\nLet's see how this looks with the digits data.\nFirst we will plot several of the input noise-free data:", "_____no_output_____" ] ], [ [ "def plot_digits(data):\n fig, axes = plt.subplots(4, 10, figsize=(10, 4),\n subplot_kw={'xticks':[], 'yticks':[]},\n gridspec_kw=dict(hspace=0.1, wspace=0.1))\n for i, ax in enumerate(axes.flat):\n ax.imshow(data[i].reshape(8, 8),\n cmap='binary', interpolation='nearest',\n clim=(0, 16))\nplot_digits(digits.data)", "_____no_output_____" ] ], [ [ "Now lets add some random noise to create a noisy dataset, and re-plot it:", "_____no_output_____" ] ], [ [ "np.random.seed(42)\nnoisy = np.random.normal(digits.data, 4)\nplot_digits(noisy)", "_____no_output_____" ] ], [ [ "It's clear by eye that the images are noisy, and contain spurious pixels.\nLet's train a PCA on the noisy data, requesting that the projection preserve 50% of the variance:", "_____no_output_____" ] ], [ [ "pca = PCA(0.50).fit(noisy)\npca.n_components_", "_____no_output_____" ] ], [ [ "Here 50% of the variance amounts to 12 principal components.\nNow we compute these components, and then use the inverse of the transform to reconstruct the filtered digits:", "_____no_output_____" ] ], [ [ "components = pca.transform(noisy)\nfiltered = pca.inverse_transform(components)\nplot_digits(filtered)", "_____no_output_____" ] ], [ [ "This signal preserving/noise filtering property makes PCA a very useful feature selection routine—for example, rather than training a classifier on very high-dimensional data, you might instead train the classifier on the lower-dimensional representation, which will automatically serve to filter out random noise in the inputs.", "_____no_output_____" ], [ "## Example: Eigenfaces\n\nEarlier we explored an example of using a PCA projection as a feature selector for facial recognition with a support vector machine (see [In-Depth: Support Vector Machines](05.07-Support-Vector-Machines.ipynb)).\nHere we will take a look back and explore a bit more of what went into that.\nRecall that we were using the Labeled Faces in the Wild dataset made available through Scikit-Learn:", "_____no_output_____" ] ], [ [ "from sklearn.datasets import fetch_lfw_people\nfaces = fetch_lfw_people(min_faces_per_person=60)\nprint(faces.target_names)\nprint(faces.images.shape)", "['Ariel Sharon' 'Colin Powell' 'Donald Rumsfeld' 'George W Bush'\n 'Gerhard Schroeder' 'Hugo Chavez' 'Junichiro Koizumi' 'Tony Blair']\n(1348, 62, 47)\n" ] ], [ [ "Let's take a look at the principal axes that span this dataset.\nBecause this is a large dataset, we will use ``RandomizedPCA``—it contains a randomized method to approximate the first $N$ principal components much more quickly than the standard ``PCA`` estimator, and thus is very useful for high-dimensional data (here, a dimensionality of nearly 3,000).\nWe will take a look at the first 150 components:", "_____no_output_____" ] ], [ [ "from sklearn.decomposition import RandomizedPCA\npca = RandomizedPCA(150)\npca.fit(faces.data)", "_____no_output_____" ] ], [ [ "In this case, it can be interesting to visualize the images associated with the first several principal components (these components are technically known as \"eigenvectors,\"\nso these types of images are often called \"eigenfaces\").\nAs you can see in this figure, they are as creepy as they sound:", "_____no_output_____" ] ], [ [ "fig, axes = plt.subplots(3, 8, figsize=(9, 4),\n subplot_kw={'xticks':[], 'yticks':[]},\n gridspec_kw=dict(hspace=0.1, wspace=0.1))\nfor i, ax in enumerate(axes.flat):\n ax.imshow(pca.components_[i].reshape(62, 47), cmap='bone')", "_____no_output_____" ] ], [ [ "The results are very interesting, and give us insight into how the images vary: for example, the first few eigenfaces (from the top left) seem to be associated with the angle of lighting on the face, and later principal vectors seem to be picking out certain features, such as eyes, noses, and lips.\nLet's take a look at the cumulative variance of these components to see how much of the data information the projection is preserving:", "_____no_output_____" ] ], [ [ "plt.plot(np.cumsum(pca.explained_variance_ratio_))\nplt.xlabel('number of components')\nplt.ylabel('cumulative explained variance');", "_____no_output_____" ] ], [ [ "We see that these 150 components account for just over 90% of the variance.\nThat would lead us to believe that using these 150 components, we would recover most of the essential characteristics of the data.\nTo make this more concrete, we can compare the input images with the images reconstructed from these 150 components:", "_____no_output_____" ] ], [ [ "# Compute the components and projected faces\npca = RandomizedPCA(150).fit(faces.data)\ncomponents = pca.transform(faces.data)\nprojected = pca.inverse_transform(components)", "_____no_output_____" ], [ "# Plot the results\nfig, ax = plt.subplots(2, 10, figsize=(10, 2.5),\n subplot_kw={'xticks':[], 'yticks':[]},\n gridspec_kw=dict(hspace=0.1, wspace=0.1))\nfor i in range(10):\n ax[0, i].imshow(faces.data[i].reshape(62, 47), cmap='binary_r')\n ax[1, i].imshow(projected[i].reshape(62, 47), cmap='binary_r')\n \nax[0, 0].set_ylabel('full-dim\\ninput')\nax[1, 0].set_ylabel('150-dim\\nreconstruction');", "_____no_output_____" ] ], [ [ "The top row here shows the input images, while the bottom row shows the reconstruction of the images from just 150 of the ~3,000 initial features.\nThis visualization makes clear why the PCA feature selection used in [In-Depth: Support Vector Machines](05.07-Support-Vector-Machines.ipynb) was so successful: although it reduces the dimensionality of the data by nearly a factor of 20, the projected images contain enough information that we might, by eye, recognize the individuals in the image.\nWhat this means is that our classification algorithm needs to be trained on 150-dimensional data rather than 3,000-dimensional data, which depending on the particular algorithm we choose, can lead to a much more efficient classification.", "_____no_output_____" ], [ "## Principal Component Analysis Summary\n\nIn this section we have discussed the use of principal component analysis for dimensionality reduction, for visualization of high-dimensional data, for noise filtering, and for feature selection within high-dimensional data.\nBecause of the versatility and interpretability of PCA, it has been shown to be effective in a wide variety of contexts and disciplines.\nGiven any high-dimensional dataset, I tend to start with PCA in order to visualize the relationship between points (as we did with the digits), to understand the main variance in the data (as we did with the eigenfaces), and to understand the intrinsic dimensionality (by plotting the explained variance ratio).\nCertainly PCA is not useful for every high-dimensional dataset, but it offers a straightforward and efficient path to gaining insight into high-dimensional data.\n\nPCA's main weakness is that it tends to be highly affected by outliers in the data.\nFor this reason, many robust variants of PCA have been developed, many of which act to iteratively discard data points that are poorly described by the initial components.\nScikit-Learn contains a couple interesting variants on PCA, including ``RandomizedPCA`` and ``SparsePCA``, both also in the ``sklearn.decomposition`` submodule.\n``RandomizedPCA``, which we saw earlier, uses a non-deterministic method to quickly approximate the first few principal components in very high-dimensional data, while ``SparsePCA`` introduces a regularization term (see [In Depth: Linear Regression](05.06-Linear-Regression.ipynb)) that serves to enforce sparsity of the components.\n\nIn the following sections, we will look at other unsupervised learning methods that build on some of the ideas of PCA.", "_____no_output_____" ], [ "<!--NAVIGATION-->\n< [In-Depth: Decision Trees and Random Forests](05.08-Random-Forests.ipynb) | [Contents](Index.ipynb) | [In-Depth: Manifold Learning](05.10-Manifold-Learning.ipynb) >\n\n<a href=\"https://colab.research.google.com/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/05.09-Principal-Component-Analysis.ipynb\"><img align=\"left\" src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open in Colab\" title=\"Open and Execute in Google Colaboratory\"></a>\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ] ]
4a7bfed72cb8db56d974fbf6641d0fce8407b596
22,406
ipynb
Jupyter Notebook
P1.ipynb
EugeneTolstikhin/udacity-find-lane-lines
2877f5e97b706640e07d2ebb26196bf83cb771cb
[ "MIT" ]
null
null
null
P1.ipynb
EugeneTolstikhin/udacity-find-lane-lines
2877f5e97b706640e07d2ebb26196bf83cb771cb
[ "MIT" ]
null
null
null
P1.ipynb
EugeneTolstikhin/udacity-find-lane-lines
2877f5e97b706640e07d2ebb26196bf83cb771cb
[ "MIT" ]
null
null
null
38.564544
647
0.5985
[ [ [ "# Self-Driving Car Engineer Nanodegree\n\n\n## Project: **Finding Lane Lines on the Road** \n***\nIn this project, you will use the tools you learned about in the lesson to identify lane lines on the road. You can develop your pipeline on a series of individual images, and later apply the result to a video stream (really just a series of images). Check out the video clip \"raw-lines-example.mp4\" (also contained in this repository) to see what the output should look like after using the helper functions below. \n\nOnce you have a result that looks roughly like \"raw-lines-example.mp4\", you'll need to get creative and try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video \"P1_example.mp4\". Ultimately, you would like to draw just one line for the left side of the lane, and one for the right.\n\nIn addition to implementing code, there is a brief writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) that can be used to guide the writing process. Completing both the code in the Ipython notebook and the writeup template will cover all of the [rubric points](https://review.udacity.com/#!/rubrics/322/view) for this project.\n\n---\nLet's have a look at our first image called 'test_images/solidWhiteRight.jpg'. Run the 2 cells below (hit Shift-Enter or the \"play\" button above) to display the image.\n\n**Note: If, at any point, you encounter frozen display windows or other confounding issues, you can always start again with a clean slate by going to the \"Kernel\" menu above and selecting \"Restart & Clear Output\".**\n\n---", "_____no_output_____" ], [ "**The tools you have are color selection, region of interest selection, grayscaling, Gaussian smoothing, Canny Edge Detection and Hough Tranform line detection. You are also free to explore and try other techniques that were not presented in the lesson. Your goal is piece together a pipeline to detect the line segments in the image, then average/extrapolate them and draw them onto the image for display (as below). Once you have a working pipeline, try it out on the video stream below.**\n\n---\n\n<figure>\n <img src=\"examples/line-segments-example.jpg\" width=\"380\" alt=\"Combined Image\" />\n <figcaption>\n <p></p> \n <p style=\"text-align: center;\"> Your output should look something like this (above) after detecting line segments using the helper functions below </p> \n </figcaption>\n</figure>\n <p></p> \n<figure>\n <img src=\"examples/laneLines_thirdPass.jpg\" width=\"380\" alt=\"Combined Image\" />\n <figcaption>\n <p></p> \n <p style=\"text-align: center;\"> Your goal is to connect/average/extrapolate line segments to get output like this</p> \n </figcaption>\n</figure>", "_____no_output_____" ], [ "**Run the cell below to import some packages. If you get an `import error` for a package you've already installed, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.** ", "_____no_output_____" ], [ "## Import Packages", "_____no_output_____" ] ], [ [ "#importing some useful packages\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\nimport cv2\n%matplotlib inline", "_____no_output_____" ] ], [ [ "## Read in an Image", "_____no_output_____" ] ], [ [ "#reading in an image\nimage = mpimg.imread('test_images/solidWhiteRight.jpg')\n\n#printing out some stats and plotting\nprint('This image is:', type(image), 'with dimensions:', image.shape)\nplt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')", "_____no_output_____" ] ], [ [ "## Ideas for Lane Detection Pipeline", "_____no_output_____" ], [ "**Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are:**\n\n`cv2.inRange()` for color selection \n`cv2.fillPoly()` for regions selection \n`cv2.line()` to draw lines on an image given endpoints \n`cv2.addWeighted()` to coadd / overlay two images \n`cv2.cvtColor()` to grayscale or change color \n`cv2.imwrite()` to output images to file \n`cv2.bitwise_and()` to apply a mask to an image\n\n**Check out the OpenCV documentation to learn about these and discover even more awesome functionality!**", "_____no_output_____" ], [ "## Helper Functions", "_____no_output_____" ], [ "Below are some helper functions to help get you started. They should look familiar from the lesson!", "_____no_output_____" ] ], [ [ "import math\n\ndef grayscale(img):\n \"\"\"Applies the Grayscale transform\n This will return an image with only one color channel\n but NOTE: to see the returned image as grayscale\n (assuming your grayscaled image is called 'gray')\n you should call plt.imshow(gray, cmap='gray')\"\"\"\n #return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # Or use BGR2GRAY if you read an image with cv2.imread()\n return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \ndef canny(img, low_threshold, high_threshold):\n \"\"\"Applies the Canny transform\"\"\"\n return cv2.Canny(img, low_threshold, high_threshold)\n\ndef gaussian_blur(img, kernel_size):\n \"\"\"Applies a Gaussian Noise kernel\"\"\"\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)\n\ndef region_of_interest(img, vertices):\n \"\"\"\n Applies an image mask.\n \n Only keeps the region of the image defined by the polygon\n formed from `vertices`. The rest of the image is set to black.\n `vertices` should be a numpy array of integer points.\n \"\"\"\n #defining a blank mask to start with\n mask = np.zeros_like(img) \n \n #defining a 3 channel or 1 channel color to fill the mask with depending on the input image\n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n \n #filling pixels inside the polygon defined by \"vertices\" with the fill color \n cv2.fillPoly(mask, vertices, ignore_mask_color)\n \n #returning the image only where mask pixels are nonzero\n masked_image = cv2.bitwise_and(img, mask)\n return masked_image\n\n\ndef draw_lines(img, lines, color=[255, 0, 0], thickness=2):\n \"\"\"\n NOTE: this is the function you might want to use as a starting point once you want to \n average/extrapolate the line segments you detect to map out the full\n extent of the lane (going from the result shown in raw-lines-example.mp4\n to that shown in P1_example.mp4). \n \n Think about things like separating line segments by their \n slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left\n line vs. the right line. Then, you can average the position of each of \n the lines and extrapolate to the top and bottom of the lane.\n \n This function draws `lines` with `color` and `thickness`. \n Lines are drawn on the image inplace (mutates the image).\n If you want to make the lines semi-transparent, think about combining\n this function with the weighted_img() function below\n \"\"\"\n for line in lines:\n for x1,y1,x2,y2 in line:\n if x2 != x1:\n theta = np.arctan((y2 - y1) / (x2 - x1)) * 180 / np.pi\n \"\"\"\n theta < 0 - left line\n theta > 0 - right line\n \"\"\"\n if 20. < abs(theta) < 40.:\n cv2.line(img, (x1, y1), (x2, y2), color, thickness)\n\ndef hough_lines(img, ρ, Θ, threshold, min_line_len, max_line_gap):\n \"\"\"\n `img` should be the output of a Canny transform.\n \n Returns an image with hough lines drawn.\n \"\"\"\n lines = cv2.HoughLinesP(img, ρ, Θ, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)\n line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n draw_lines(line_img, lines)\n return line_img\n\n# Python 3 has support for cool math symbols.\n\ndef weighted_img(img, initial_img, α = 0.8, β = 1., γ = 0.):\n \"\"\"\n `img` is the output of the hough_lines(), An image with lines drawn on it.\n Should be a blank image (all black) with lines drawn on it.\n \n `initial_img` should be the image before any processing.\n \n The result image is computed as follows:\n \n initial_img * α + img * β + γ\n NOTE: initial_img and img must be the same shape!\n \"\"\"\n return cv2.addWeighted(initial_img, α, img, β, γ)", "_____no_output_____" ] ], [ [ "## Test Images\n\nBuild your pipeline to work on the images in the directory \"test_images\" \n**You should make sure your pipeline works well on these images before you try the videos.**", "_____no_output_____" ] ], [ [ "import os\nos.listdir(\"test_images/\")", "_____no_output_____" ] ], [ [ "## Build a Lane Finding Pipeline\n\n", "_____no_output_____" ], [ "Build the pipeline and run your solution on all test_images. Make copies into the `test_images_output` directory, and you can use the images in your writeup report.\n\nTry tuning the various parameters, especially the low and high Canny thresholds as well as the Hough lines parameters.", "_____no_output_____" ] ], [ [ "def process_image(image):\n # NOTE: The output you return should be a color image (3 channel) for processing video below\n # TODO: put your pipeline here,\n # you should return the final output (image where lines are drawn on lanes)\n\n gray = grayscale(image)\n\n blur_gray = gaussian_blur(\n img = gray,\n kernel_size = 5)\n\n edges = canny(\n img = blur_gray,\n low_threshold = 100,\n high_threshold = 200)\n\n imshape = image.shape\n\n masked_image = region_of_interest(\n img = edges,\n vertices = np.array([[ (0 , imshape[0] ),\n (imshape[1] / 2, 1.15 * imshape[0] / 2 ),\n (imshape[1] / 2, 1.15 * imshape[0] / 2 ),\n (imshape[1] , imshape[0] )\n ]], dtype=np.int32))\n\n line_img = hough_lines(\n img = masked_image,\n ρ = 1,\n Θ = np.pi/180,\n threshold = 1,\n min_line_len = 20,\n max_line_gap = 10)\n\n lines_edges = weighted_img(\n img = line_img,\n initial_img = image\n )\n\n return lines_edges", "_____no_output_____" ], [ "# TODO: Build your pipeline that will draw lane lines on the test_images\n# then save them to the test_images_output directory.\n\ntest_folder = \"test_images/\"\noutput_folder = \"test_images_output/\"\n\nif not os.path.exists(output_folder):\n os.mkdir(output_folder)\n \n\nfor name in os.listdir(test_folder):\n image = cv2.imread(os.path.join(test_folder, name))\n cv2.imwrite(os.path.join(output_folder, name), process_image(image))\n", "_____no_output_____" ] ], [ [ "## Test on Videos\n\nYou know what's cooler than drawing lanes over images? Drawing lanes over video!\n\nWe can test our solution on two provided videos:\n\n`solidWhiteRight.mp4`\n\n`solidYellowLeft.mp4`\n\n**Note: if you get an import error when you run the next cell, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.**\n\n**If you get an error that looks like this:**\n```\nNeedDownloadError: Need ffmpeg exe. \nYou can download it by calling: \nimageio.plugins.ffmpeg.download()\n```\n**Follow the instructions in the error message and check out [this forum post](https://discussions.udacity.com/t/project-error-of-test-on-videos/274082) for more troubleshooting tips across operating systems.**", "_____no_output_____" ] ], [ [ "# Import everything needed to edit/save/watch video clips\nfrom moviepy.editor import VideoFileClip\nfrom IPython.display import HTML", "_____no_output_____" ] ], [ [ "Let's try the one with the solid white lane on the right first ...", "_____no_output_____" ] ], [ [ "white_output = 'test_videos_output/solidWhiteRight.mp4'\n## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video\n## To do so add .subclip(start_second,end_second) to the end of the line below\n## Where start_second and end_second are integer values representing the start and end of the subclip\n## You may also uncomment the following line for a subclip of the first 5 seconds\n##clip1 = VideoFileClip(\"test_videos/solidWhiteRight.mp4\").subclip(0,5)\nclip1 = VideoFileClip(\"test_videos/solidWhiteRight.mp4\")\nwhite_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!\n%time white_clip.write_videofile(white_output, audio=False)", "_____no_output_____" ] ], [ [ "Play the video inline, or if you prefer find the video in your filesystem (should be in the same directory) and play it in your video player of choice.", "_____no_output_____" ] ], [ [ "HTML(\"\"\"\n<video width=\"960\" height=\"540\" controls>\n <source src=\"{0}\">\n</video>\n\"\"\".format(white_output))", "_____no_output_____" ] ], [ [ "## Improve the draw_lines() function\n\n**At this point, if you were successful with making the pipeline and tuning parameters, you probably have the Hough line segments drawn onto the road, but what about identifying the full extent of the lane and marking it clearly as in the example video (P1_example.mp4)? Think about defining a line to run the full length of the visible lane based on the line segments you identified with the Hough Transform. As mentioned previously, try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video \"P1_example.mp4\".**\n\n**Go back and modify your draw_lines function accordingly and try re-running your pipeline. The new output should draw a single, solid line over the left lane line and a single, solid line over the right lane line. The lines should start from the bottom of the image and extend out to the top of the region of interest.**", "_____no_output_____" ], [ "Now for the one with the solid yellow lane on the left. This one's more tricky!", "_____no_output_____" ] ], [ [ "yellow_output = 'test_videos_output/solidYellowLeft.mp4'\n## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video\n## To do so add .subclip(start_second,end_second) to the end of the line below\n## Where start_second and end_second are integer values representing the start and end of the subclip\n## You may also uncomment the following line for a subclip of the first 5 seconds\n##clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4').subclip(0,5)\nclip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')\nyellow_clip = clip2.fl_image(process_image)\n%time yellow_clip.write_videofile(yellow_output, audio=False)", "_____no_output_____" ], [ "HTML(\"\"\"\n<video width=\"960\" height=\"540\" controls>\n <source src=\"{0}\">\n</video>\n\"\"\".format(yellow_output))", "_____no_output_____" ] ], [ [ "## Writeup and Submission\n\nIf you're satisfied with your video outputs, it's time to make the report writeup in a pdf or markdown file. Once you have this Ipython notebook ready along with the writeup, it's time to submit for review! Here is a [link](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) to the writeup template file.\n", "_____no_output_____" ], [ "## Optional Challenge\n\nTry your lane finding pipeline on the video below. Does it still work? Can you figure out a way to make it more robust? If you're up for the challenge, modify your pipeline so it works with this video and submit it along with the rest of your project!", "_____no_output_____" ] ], [ [ "challenge_output = 'test_videos_output/challenge.mp4'\n## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video\n## To do so add .subclip(start_second,end_second) to the end of the line below\n## Where start_second and end_second are integer values representing the start and end of the subclip\n## You may also uncomment the following line for a subclip of the first 5 seconds\n##clip3 = VideoFileClip('test_videos/challenge.mp4').subclip(0,5)\nclip3 = VideoFileClip('test_videos/challenge.mp4')\nchallenge_clip = clip3.fl_image(process_image)\n%time challenge_clip.write_videofile(challenge_output, audio=False)", "_____no_output_____" ], [ "HTML(\"\"\"\n<video width=\"960\" height=\"540\" controls>\n <source src=\"{0}\">\n</video>\n\"\"\".format(challenge_output))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ] ]
4a7c010a336f0eae04439e05c6bff0961d3e684e
34,851
ipynb
Jupyter Notebook
misc/brushing.ipynb
rlugojr/lightning-example-notebooks
727a427ce2ec1720e2016b8ad272a1d194f464cf
[ "MIT" ]
62
2015-01-21T03:05:14.000Z
2021-05-02T14:17:57.000Z
misc/brushing.ipynb
rlugojr/lightning-example-notebooks
727a427ce2ec1720e2016b8ad272a1d194f464cf
[ "MIT" ]
4
2015-02-06T22:14:17.000Z
2020-08-07T20:23:24.000Z
misc/brushing.ipynb
rlugojr/lightning-example-notebooks
727a427ce2ec1720e2016b8ad272a1d194f464cf
[ "MIT" ]
40
2015-03-19T03:20:12.000Z
2022-01-11T22:16:50.000Z
97.077994
5,264
0.761298
[ [ [ "# <img style='float: left' src=\"http://lightning-viz.github.io/images/logo.png\"> <br> <br> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Brushing in <a href='http://lightning-viz.github.io/'><font color='#9175f0'>Lightning</font></a>", "_____no_output_____" ], [ "## <hr> Setup", "_____no_output_____" ] ], [ [ "from lightning import Lightning\n\nfrom numpy import random, zeros", "_____no_output_____" ] ], [ [ "## Connect to server", "_____no_output_____" ] ], [ [ "lgn = Lightning(ipython=True, host='http://public.lightning-viz.org')", "_____no_output_____" ] ], [ [ "## <hr> Adding brushing", "_____no_output_____" ], [ "Several visualizations support brushing, including graphs, force networks, and scatter plots. Just set `brush=True`, then SHIFT-click and drag to select points. You should see them highlighted as you drag. We'll also turn off zooming for these examples, which can simplify our interactions with the plot.", "_____no_output_____" ] ], [ [ "x = random.randn(100)\ny = random.randn(100)\n\nlgn.scatter(x, y, brush=True, zoom=False)", "_____no_output_____" ] ], [ [ "## <hr> Getting selections", "_____no_output_____" ], [ "If working with a Lightning server (rather than the server-less mode), we can easily extract the points we've selected. ", "_____no_output_____" ] ], [ [ "x = random.rand(100) * 10\ny = random.rand(100) * 10\n\nviz = lgn.scatter(x, y, brush=True, zoom=False)\nviz", "_____no_output_____" ] ], [ [ "Let's say I use the brush in the visualization above to select all points between 0 and 4. Below, I grab those points, and replot them -- note the new scale.", "_____no_output_____" ] ], [ [ "sx, sy = viz.points()", "_____no_output_____" ], [ "lgn.scatter(sx, sy, zoom=False)", "_____no_output_____" ] ], [ [ "I can use a different accessor, `selected`, to grab the indices of the selected points.", "_____no_output_____" ] ], [ [ "inds = viz.selected()", "_____no_output_____" ] ], [ [ "Let's replot all points, but show the selected ones in a different color.", "_____no_output_____" ] ], [ [ "groups = zeros(100)\ngroups[inds] = 1\n\nlgn.scatter(x, y, group=groups, zoom=False)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a7c03956cdc4976b8f4fa0bda991a6b2838c9de
3,442
ipynb
Jupyter Notebook
static_websites/python/docs/_sources/tutorials/packages/ndarray/03-ndarray-contexts.ipynb
IvyBazan/mxnet.io-v2
fdfd79b1a2c86afb59f27e8700056cd9a32c3181
[ "MIT" ]
null
null
null
static_websites/python/docs/_sources/tutorials/packages/ndarray/03-ndarray-contexts.ipynb
IvyBazan/mxnet.io-v2
fdfd79b1a2c86afb59f27e8700056cd9a32c3181
[ "MIT" ]
null
null
null
static_websites/python/docs/_sources/tutorials/packages/ndarray/03-ndarray-contexts.ipynb
IvyBazan/mxnet.io-v2
fdfd79b1a2c86afb59f27e8700056cd9a32c3181
[ "MIT" ]
null
null
null
26.682171
117
0.563045
[ [ [ "# NDArray Contexts\n\n## Overview\nThis guide will introduce you to managing CPU versus GPU contexts for handling data.\n\nThis content was extracted and simplified from the gluon tutorials in\n[Dive Into Deep Learning](http://gluon.io/).\n\n## Prerequisites\n* [MXNet installed (with GPU support) in a Python environment](../../../install/index.html?language=Python).\n* Python 2.7.x or Python 3.x\n* **One or more GPUs**\n\n\n## Managing Context\n\nIn MXNet, every array has a context.\nOne context could be the CPU. Other contexts might be various GPUs.\nThings can get even hairier when we deploy jobs across multiple servers.\nBy assigning arrays to contexts intelligently, we can minimize\nthe time spent transferring data between devices.\nFor example, when training neural networks on a server with a GPU,\nwe typically prefer for the model's parameters to live on the GPU.\nIf you have a GPU, let's try initializing an array on the first GPU.\nOtherwise, use `ctx=mx.cpu()` in place of `ctx=gpu(0)`.", "_____no_output_____" ] ], [ [ "from mxnet import gpu\nfrom mxnet import nd\nz = nd.ones(shape=(3,3), ctx=gpu(0))\nprint(z)", "_____no_output_____" ] ], [ [ "Given an NDArray on a given context, we can copy it to another context by using\nthe copyto() method. Skip this if you don't have a GPU at the moment.", "_____no_output_____" ] ], [ [ "x_gpu = x.copyto(gpu(0))\nprint(x_gpu)", "_____no_output_____" ] ], [ [ "The result of an operator will have the same context as the inputs.", "_____no_output_____" ] ], [ [ "x_gpu + z", "_____no_output_____" ] ], [ [ "## Watch out!\n\nImagine that your variable z already lives on your second GPU\n(`gpu(0)`). What happens if we call `z.copyto(gpu(0))`? It will make a copy and\nallocate new memory, even though that variable already lives on the desired\ndevice!\n<!-- wouldn't the second GPU be gpu(1)? -->\n\nOften, we only want to make\na copy if the variable currently lives in the wrong context. In these cases, we\ncan call `as_in_context()`. If the variable is already on `gpu(0)` then this is\na no-op.", "_____no_output_____" ] ], [ [ "print('id(z):', id(z))\nz = z.copyto(gpu(0))\nprint('id(z):', id(z))\nz = z.as_in_context(gpu(0))\nprint('id(z):', id(z))\nprint(z)", "_____no_output_____" ] ], [ [ "## Next Up\n\n[Back to NDArray API Guides](.)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
4a7c0bc56e53730821bce8e7c3048a77eb08eb00
90,341
ipynb
Jupyter Notebook
07_Kaggle_SantanderValuePredictionChallenge/code/01_EDA.ipynb
KartikKannapur/Kaggle
bec2f3f4444956a1d0ce2ee0215be049c543c637
[ "MIT" ]
2
2020-12-15T06:25:07.000Z
2020-12-17T16:18:29.000Z
07_Kaggle_SantanderValuePredictionChallenge/code/01_EDA.ipynb
KartikKannapur/Kaggle
bec2f3f4444956a1d0ce2ee0215be049c543c637
[ "MIT" ]
null
null
null
07_Kaggle_SantanderValuePredictionChallenge/code/01_EDA.ipynb
KartikKannapur/Kaggle
bec2f3f4444956a1d0ce2ee0215be049c543c637
[ "MIT" ]
null
null
null
44.789787
15,368
0.523029
[ [ [ "# Santander Value Prediction Challenge", "_____no_output_____" ], [ "According to Epsilon research, 80% of customers are more likely to do business with you if you provide **personalized service**. Banking is no exception.\n\nThe digitalization of everyday lives means that customers expect services to be delivered in a personalized and timely manner… and often before they´ve even realized they need the service. In their 3rd Kaggle competition, Santander Group aims to go a step beyond recognizing that there is a need to provide a customer a financial service and **intends to determine the amount or value of the customer's transaction**. This means anticipating customer needs in a more concrete, but also simple and personal way. With so many choices for financial services, this need is greater now than ever before.\n\nIn this competition, **Santander Group is asking Kagglers to help them identify the value of transactions for each potential customer**. This is a first step that Santander needs to nail in order to personalize their services at scale.", "_____no_output_____" ], [ "The evaluation metric for this competition is Root Mean Squared Logarithmic Error. **RMSLE**", "_____no_output_____" ], [ "**You are provided with an anonymized dataset containing numeric feature variables, the numeric target column, and a string ID column.**\n\n**The task is to predict the value of target column in the test set**", "_____no_output_____" ], [ "## Load Required Libraries", "_____no_output_____" ] ], [ [ "# #Python Libraries\nimport numpy as np\nimport scipy as sp\nimport pandas as pd\nimport statsmodels\nimport pandas_profiling\n\n%matplotlib inline\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nimport os\nimport sys\nimport time\nimport json\nimport random\nimport requests\nimport datetime\n\nimport missingno as msno\nimport math\nimport sys\nimport gc\nimport os\n\n# #sklearn\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import KFold, StratifiedKFold\nfrom sklearn.ensemble import RandomForestRegressor\n\n# #sklearn - preprocessing\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import LabelEncoder\n\n# #sklearn - metrics\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.metrics import r2_score\nfrom sklearn.metrics import roc_auc_score\n\n# #XGBoost & LightGBM\nimport xgboost as xgb\nimport lightgbm as lgb\n\n# #Missing value imputation\nfrom fancyimpute import KNN, MICE\n\n# #Hyperparameter Optimization\nfrom hyperopt.pyll.base import scope\nfrom hyperopt.pyll.stochastic import sample\nfrom hyperopt import STATUS_OK, Trials, fmin, hp, tpe\n\npd.options.display.max_columns = 150", "_____no_output_____" ] ], [ [ "## EDA", "_____no_output_____" ] ], [ [ "!ls ../", "code\ndata\nsubmissions\n" ], [ "!ls ../data", "sample_submission.csv\ntest.csv\ntrain.csv\n" ], [ "df_train = pd.read_csv(\"../data/train.csv\")\ndf_test = pd.read_csv(\"../data/test.csv\")", "_____no_output_____" ], [ "df_train.shape", "_____no_output_____" ], [ "df_test.shape", "_____no_output_____" ], [ "df_train.head()", "_____no_output_____" ] ], [ [ "ID, target, everything else is anonymized", "_____no_output_____" ] ], [ [ "df_train.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 4459 entries, 0 to 4458\nColumns: 4993 entries, ID to 9fc776466\ndtypes: float64(1845), int64(3147), object(1)\nmemory usage: 169.9+ MB\n" ], [ "df_test.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 49342 entries, 0 to 49341\nColumns: 4992 entries, ID to 9fc776466\ndtypes: float64(4991), object(1)\nmemory usage: 1.8+ GB\n" ] ], [ [ "### Missing Data", "_____no_output_____" ] ], [ [ "df_train.isnull().sum(axis = 0).sum()", "_____no_output_____" ], [ "df_test.isnull().sum(axis = 0).sum()", "_____no_output_____" ] ], [ [ "Yes!! No missing data", "_____no_output_____" ], [ "### Distributions", "_____no_output_____" ] ], [ [ "sns.distplot(df_train['target'])", "_____no_output_____" ], [ "sns.distplot(np.log(1+df_train['target']))", "_____no_output_____" ] ], [ [ "Now, the distribution looks much more normal.", "_____no_output_____" ], [ "### Hypothesis: Are any of the columns having a constant value?\n\nSince the dataset is so small and number of rows < number of columns.", "_____no_output_____" ] ], [ [ "constant_train = df_train.loc[:, (df_train == df_train.iloc[0]).all()].columns.tolist()\nconstant_test = df_test.loc[:, (df_test == df_test.iloc[0]).all()].columns.tolist()", "_____no_output_____" ], [ "len(constant_train)", "_____no_output_____" ], [ "len(constant_test)", "_____no_output_____" ] ], [ [ "There are 256 constant columns in the training dataset, but none in the test dataset. These constant columns are thus most likely an artifact of the way that the train and test sets were constructed. Let's remove them from out train set since they will not add any value.", "_____no_output_____" ] ], [ [ "columns_to_use = df_test.columns.tolist() # #Target variable is not considered\ndel columns_to_use[0] # #Remove 'ID'\ncolumns_to_use = [x for x in columns_to_use if x not in constant_train] #Remove all 0 columns\nlen(columns_to_use)", "_____no_output_____" ] ], [ [ "### Measure of sparsity", "_____no_output_____" ] ], [ [ "((df_train[columns_to_use].values.flatten())==0).mean()", "_____no_output_____" ] ], [ [ "97% of values in the train set are zeros, indicating that it is a very sparse matrix", "_____no_output_____" ], [ "## Modelling", "_____no_output_____" ] ], [ [ "# #Log Transform the target variable\ny = np.log(1+df_train.target.values)", "_____no_output_____" ], [ "X = lgb.Dataset(df_train[columns_to_use], y, feature_name = \"auto\")", "_____no_output_____" ] ], [ [ "### Model 1 - LightGBM (My Favourite :P)", "_____no_output_____" ] ], [ [ "params = {'boosting_type': 'gbdt', \n 'objective': 'regression', \n 'metric': 'rmse', \n 'learning_rate': 0.01, \n 'num_leaves': 100, \n 'feature_fraction': 0.4, \n 'bagging_fraction': 0.6, \n 'max_depth': 5, \n 'min_child_weight': 10}", "_____no_output_____" ], [ "clf = lgb.train(params,\n X,\n num_boost_round = 400,\n verbose_eval=True)", "_____no_output_____" ], [ "preds = clf.predict(df_test[columns_to_use])\npreds", "_____no_output_____" ], [ "sample_submission = pd.read_csv(\"../data/sample_submission.csv\")\nsample_submission.target = np.exp(preds)-1\nsample_submission.to_csv('../submissions/model1_lightgbm_01.csv', index=False)\nsample_submission.shape", "_____no_output_____" ], [ "nr_splits = 5\nrandom_state = 1054\n\ny_oof = np.zeros((y.shape[0]))\ntotal_preds = 0\n\n\nkf = KFold(n_splits=nr_splits, shuffle=True, random_state=random_state)\nfor i, (train_index, val_index) in enumerate(kf.split(y)):\n print('Fitting fold', i+1, 'out of', nr_splits)\n X_train, X_val = df_train[columns_to_use].iloc[train_index], df_train[columns_to_use].iloc[val_index]\n y_train, y_val = y[train_index], y[val_index]\n \n train = lgb.Dataset(X_train,y_train ,feature_name = \"auto\")\n val = lgb.Dataset(X_val ,y_val ,feature_name = \"auto\")\n clf = lgb.train(params,train,num_boost_round = 400,verbose_eval=True)\n \n total_preds += clf.predict(df_test[columns_to_use])/nr_splits\n pred_oof = clf.predict(X_val)\n y_oof[val_index] = pred_oof\n print('Fold error', np.sqrt(mean_squared_error(y_val, pred_oof)))\n\nprint('Total error', np.sqrt(mean_squared_error(y, y_oof)))", "Fitting fold 1 out of 5\nFold error 1.4591105635600472\nFitting fold 2 out of 5\nFold error 1.4590817395947246\nFitting fold 3 out of 5\nFold error 1.470471946373302\nFitting fold 4 out of 5\nFold error 1.4985714284380764\nFitting fold 5 out of 5\nFold error 1.5322825994743314\nTotal error 1.484159995484231\n" ], [ "sample_submission.target = np.exp(total_preds)-1\nsample_submission.to_csv('../submissions/model1_lightgbm_02.csv', index=False)\nsample_submission.head()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
4a7c0d57556c23f5bd38a11d44748ea570d2d319
25,595
ipynb
Jupyter Notebook
site/ja/tutorials/keras/save_and_load.ipynb
wakamezake/docs-l10n
5d282ddaf4444058ea12852a06d5ccf19967436e
[ "Apache-2.0" ]
null
null
null
site/ja/tutorials/keras/save_and_load.ipynb
wakamezake/docs-l10n
5d282ddaf4444058ea12852a06d5ccf19967436e
[ "Apache-2.0" ]
null
null
null
site/ja/tutorials/keras/save_and_load.ipynb
wakamezake/docs-l10n
5d282ddaf4444058ea12852a06d5ccf19967436e
[ "Apache-2.0" ]
null
null
null
28.597765
429
0.510412
[ [ [ "##### Copyright 2018 The TensorFlow Authors.", "_____no_output_____" ] ], [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ], [ "#@title MIT License\n#\n# Copyright (c) 2017 François Chollet\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.", "_____no_output_____" ] ], [ [ "# モデルの保存と復元", "_____no_output_____" ], [ "<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/tutorials/keras/save_and_load\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/tutorials/keras/save_and_load.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/docs-l10n/blob/master/site/ja/tutorials/keras/save_and_load.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n</table>", "_____no_output_____" ], [ "Note: これらのドキュメントは私たちTensorFlowコミュニティが翻訳したものです。コミュニティによる 翻訳は**ベストエフォート**であるため、この翻訳が正確であることや[英語の公式ドキュメント](https://www.tensorflow.org/?hl=en)の 最新の状態を反映したものであることを保証することはできません。 この翻訳の品質を向上させるためのご意見をお持ちの方は、GitHubリポジトリ[tensorflow/docs](https://github.com/tensorflow/docs)にプルリクエストをお送りください。 コミュニティによる翻訳やレビューに参加していただける方は、 [[email protected] メーリングリスト](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs-ja)にご連絡ください。", "_____no_output_____" ], [ "モデルは訓練中にも、訓練が終わったあとも保存できます。このことは、長い訓練時間を掛けなくても、やめたところから再開できるということを意味します。モデルが保存可能であることは、あなたが作ったモデルを他の人と共有できるということでもあります。研究結果であるモデルや手法を公開する際、機械学習の実務家はほとんど次のものを共有します。\n\n* モデルを構築するプログラム\n* 学習済みモデルの重みあるいはパラメータ\n\nこのデータを共有することで、他の人がモデルだどの様に動作するかを理解したり、新しいデータに試してみたりすることが容易になります。\n\n注意:信頼できないプログラムには気をつけましょう。TensorFlowのモデルもプログラムです。詳しくは、[Using TensorFlow Securely](https://github.com/tensorflow/tensorflow/blob/master/SECURITY.md)を参照してください。\n\n### オプション\n\nTensorFlowのモデルを保存する方法は、使っているAPIによって異なります。このガイドはTensorFlowのモデルを構築し訓練するためのハイレベルなAPIである[tf.keras](https://www.tensorflow.org/guide/keras)を使っています。この他のアプローチについては、TensorFlowの [Save and Restore](https://www.tensorflow.org/guide/saved_model) ガイド、あるいは、[Saving in eager](https://www.tensorflow.org/guide/eager#object-based_saving)を参照してください。", "_____no_output_____" ], [ "## 設定\n\n### インストールとインポート", "_____no_output_____" ], [ "TensorFlowと依存関係のライブラリをインストールし、インポートします。", "_____no_output_____" ] ], [ [ "!pip install h5py pyyaml ", "_____no_output_____" ] ], [ [ "### サンプルデータセットの取得\n\nここでは、モデルを訓練し重みの保存をデモするために、 [MNIST dataset](http://yann.lecun.com/exdb/mnist/) を使います。デモの実行を速くするため、最初の1,000件のサンプルだけを使います。", "_____no_output_____" ] ], [ [ "from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\n\ntry:\n # Colab only\n %tensorflow_version 2.x\nexcept Exception:\n pass\nimport tensorflow as tf\nfrom tensorflow import keras\n\ntf.__version__", "_____no_output_____" ], [ "(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()\n\ntrain_labels = train_labels[:1000]\ntest_labels = test_labels[:1000]\n\ntrain_images = train_images[:1000].reshape(-1, 28 * 28) / 255.0\ntest_images = test_images[:1000].reshape(-1, 28 * 28) / 255.0", "_____no_output_____" ] ], [ [ "### モデルの定義", "_____no_output_____" ], [ "重みの保存と読み込みのデモを行うための簡単なモデルを定義しましょう。", "_____no_output_____" ] ], [ [ "# 短いシーケンシャルモデルを返す関数\ndef create_model():\n model = tf.keras.models.Sequential([\n keras.layers.Dense(512, activation='relu', input_shape=(784,)),\n keras.layers.Dropout(0.2),\n keras.layers.Dense(10, activation='softmax')\n ])\n \n model.compile(optimizer='adam', \n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n \n return model\n\n\n# 基本的なモデルのインスタンスを作る\nmodel = create_model()\nmodel.summary()", "_____no_output_____" ] ], [ [ "## 訓練中にチェックポイントを保存する", "_____no_output_____" ], [ "主な用途は訓練の**途中**あるいは**終了後**にチェックポイントを自動的に保存することです。こうすることにより、再び訓練を行うことなくモデルを使用することができ、また、訓練が中断された場合に、中止したところから再開できます。\n\n`tf.keras.callbacks.ModelCheckpoint`がこれを行うためのコールバックです。このコールバックにはチェックポイントを構成するためのいくつかの引数があります。\n\n### チェックポイントコールバックの使い方\n\nモデルの訓練時に、`ModelCheckpoint`を渡します。", "_____no_output_____" ] ], [ [ "checkpoint_path = \"training_1/cp.ckpt\"\ncheckpoint_dir = os.path.dirname(checkpoint_path)\n\n# チェックポイントコールバックを作る\ncp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path, \n save_weights_only=True,\n verbose=1)\n\nmodel = create_model()\n\nmodel.fit(train_images, train_labels, epochs = 10, \n validation_data = (test_images,test_labels),\n callbacks = [cp_callback]) # 訓練にコールバックを渡す\n\n# オプティマイザの状態保存についての警告が表示されるかもしれません。\n# これらの警告は(このノートブックで発生する同様な警告を含めて)\n# 古い用法を非推奨にするためのもので、無視して構いません。", "_____no_output_____" ] ], [ [ "この結果、エポックごとに更新される一連のTensorFlowチェックポイントファイルが作成されます。", "_____no_output_____" ] ], [ [ "!ls {checkpoint_dir}", "_____no_output_____" ] ], [ [ "訓練していない新しいモデルを作ります。重みだけからモデルを復元する場合には、元のモデルと同じアーキテクチャのモデルが必要です。モデルのアーキテクチャが同じであるため、モデルの異なる**インスタンス**であっても重みを共有することができるのです。\n\n訓練していない全く新しいモデルを作り、テストデータセットで評価します。訓練をしていないモデルは偶然のレベル(正解率10%以下)の性能しか無いはずです。", "_____no_output_____" ] ], [ [ "model = create_model()\n\nloss, acc = model.evaluate(test_images, test_labels, verbose=2)\nprint(\"Untrained model, accuracy: {:5.2f}%\".format(100*acc))", "_____no_output_____" ] ], [ [ "次に、チェックポイントから重みをロードし、再び評価します。", "_____no_output_____" ] ], [ [ "model.load_weights(checkpoint_path)\nloss,acc = model.evaluate(test_images, test_labels, verbose=2)\nprint(\"Restored model, accuracy: {:5.2f}%\".format(100*acc))", "_____no_output_____" ] ], [ [ "### チェックポイントコールバックのオプション\n\nこのコールバックには、チェックポイントに一意な名前をつけたり、チェックポイントの頻度を調整するためのオプションがあります。\n\n新しいモデルを訓練し、5エポックごとに一意な名前のチェックポイントを保存します。", "_____no_output_____" ] ], [ [ "# ファイル名に(`str.format`を使って)エポック数を埋め込みます\ncheckpoint_path = \"training_2/cp-{epoch:04d}.ckpt\"\ncheckpoint_dir = os.path.dirname(checkpoint_path)\n\ncp_callback = tf.keras.callbacks.ModelCheckpoint(\n checkpoint_path, verbose=1, save_weights_only=True,\n # 重みを5エポックごとに保存します\n period=5)\n\nmodel = create_model()\nmodel.fit(train_images, train_labels,\n epochs = 50, callbacks = [cp_callback],\n validation_data = (test_images,test_labels),\n verbose=0)", "_____no_output_____" ] ], [ [ "次に、出来上がったチェックポイントを確認し、最後のものを選択します。", "_____no_output_____" ] ], [ [ "! ls {checkpoint_dir}", "_____no_output_____" ], [ "latest = tf.train.latest_checkpoint(checkpoint_dir)\nlatest", "_____no_output_____" ] ], [ [ "注意:デフォルトのtensorflowフォーマットは、直近の5つのチェックポイントのみを保存します。\n\nテストのため、モデルをリセットし最後のチェックポイントをロードします。", "_____no_output_____" ] ], [ [ "model = create_model()\nmodel.load_weights(latest)\nloss, acc = model.evaluate(test_images, test_labels, verbose=2)\nprint(\"Restored model, accuracy: {:5.2f}%\".format(100*acc))", "_____no_output_____" ] ], [ [ "## これらのファイルは何?", "_____no_output_____" ], [ "上記のコードでは、重みだけをバイナリで[checkpoint](https://www.tensorflow.org/guide/saved_model#save_and_restore_variables)形式の一連のファイルに保存します。チェックポイントには、次のものが含まれます。\n\n* 1つ以上のモデルの重みの断片\n* どの重みがどの断片に保存されているかを示すインデックスファイル\n\n1台のマシンだけでモデルの訓練を行っている場合には、`.data-00000-of-00001`のようなサフィックスのついたファイルが1つだけ作成されます。", "_____no_output_____" ], [ "## 手動で重みを保存する\n\n上記では重みをモデルにロードする方法を見ました。\n\n手動で重みを保存するのも同じ様に簡単です。`Model.save_weights` メソッドを使います。", "_____no_output_____" ] ], [ [ "# 重みの保存\nmodel.save_weights('./checkpoints/my_checkpoint')\n\n# 重みの復元\nmodel = create_model()\nmodel.load_weights('./checkpoints/my_checkpoint')\n\nloss,acc = model.evaluate(test_images, test_labels, verbose=2)\nprint(\"Restored model, accuracy: {:5.2f}%\".format(100*acc))", "_____no_output_____" ] ], [ [ "## モデル全体の保存\n\nモデルとオプティマイザを、その状態(重みと変数)とモデルの設定の両方を含む1つのファイルに保存することができます。これにより、モデルをオリジナルのPythonコードにアクセスしなくとも使用できるようにエクスポートできます。オプティマイザの状態が復元されるので、中断したところから訓練を再開することも可能です。\n\n完全に機能するモデルを保存できるのは便利です。保存したモデルをTensorFlow.js ([HDF5](https://js.tensorflow.org/tutorials/import-keras.html), [Saved Model](https://js.tensorflow.org/tutorials/import-saved-model.html))でロードし、ブラウザで訓練したり、実行したりすることができるほか、TensorFlow Lite ([HDF5](https://www.tensorflow.org/lite/convert/python_api#exporting_a_tfkeras_file_), [Saved Model](https://www.tensorflow.org/lite/convert/python_api#exporting_a_savedmodel_))\nを使ってモバイルデバイスで実行できるように変換することも可能です。", "_____no_output_____" ], [ "### HDF5ファイルとして\n\nKerasでは、[HDF5](https://en.wikipedia.org/wiki/Hierarchical_Data_Format) 標準を使った基本的なファイルフォーマットが利用できます。ここでの利用目的では、保存されたモデルは単独のバイナリラージオブジェクト(blob)として扱うことができます。", "_____no_output_____" ] ], [ [ "model = create_model()\n\nmodel.fit(train_images, train_labels, epochs=5)\n\n# モデル全体を1つのHDF5ファイルに保存します。\nmodel.save('my_model.h5')", "_____no_output_____" ] ], [ [ "保存したファイルを使ってモデルを再作成します。", "_____no_output_____" ] ], [ [ "# 重みとオプティマイザを含む全く同じモデルを再作成\nnew_model = keras.models.load_model('my_model.h5')\nnew_model.summary()", "_____no_output_____" ] ], [ [ "正解率を検査します。", "_____no_output_____" ] ], [ [ "loss, acc = new_model.evaluate(test_images, test_labels, verbose=2)\nprint(\"Restored model, accuracy: {:5.2f}%\".format(100*acc))", "_____no_output_____" ] ], [ [ "この方法では、次のすべてが保存されます。\n\n* 重みの値\n* モデルの設定(アーキテクチャ)\n* オプティマイザの設定\n\nKerasは保存する際にアーキテクチャを調べます。いまのところ、TensorFlowのオプティマイザ(`tf.train`に含まれるもの)を保存することはできません。TensorFlowのオプティマイザを使用している場合には、モデルをロードしたあと再コンパイルする必要があり、オプティマイザの状態は失われます。", "_____no_output_____" ], [ "### `saved_model`として", "_____no_output_____" ], [ "注意:この手法による`tf.keras`モデルの保存は実験的なもので、将来のバージョンで変更される可能性があります。", "_____no_output_____" ], [ "新しいモデルを作ります。", "_____no_output_____" ] ], [ [ "model = create_model()\n\nmodel.fit(train_images, train_labels, epochs=5)", "_____no_output_____" ] ], [ [ "`saved_model`を作成し、タイムスタンプ付きのディレクトリに保存します。", "_____no_output_____" ] ], [ [ "import time\nsaved_model_path = \"./saved_models/{}\".format(int(time.time()))\n\ntf.keras.experimental.export_saved_model(model, saved_model_path)\nsaved_model_path", "_____no_output_____" ] ], [ [ "作成したsaved_modelsを一覧表示します。", "_____no_output_____" ] ], [ [ "!ls saved_models/", "_____no_output_____" ] ], [ [ "保存されたモデル(SavedModel)から新しいKerasモデルをリロードします。", "_____no_output_____" ] ], [ [ "new_model = tf.keras.experimental.load_from_saved_model(saved_model_path)\nnew_model.summary()", "_____no_output_____" ] ], [ [ "復元されたモデルを実行します。", "_____no_output_____" ] ], [ [ "model.predict(test_images).shape", "_____no_output_____" ], [ "# モデルを評価する前にコンパイルする必要があります。\n# モデルをデプロイするだけであればこのステップは不要です。\n\nnew_model.compile(optimizer=model.optimizer, # ロードしてあったオプティマイザを保持\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n# モデルを評価します。\nloss, acc = new_model.evaluate(test_images, test_labels, verbose=2)\nprint(\"Restored model, accuracy: {:5.2f}%\".format(100*acc))", "_____no_output_____" ] ], [ [ "## この先は?\n\n`tf.keras`を使った保存とロードのクイックガイドでした。\n\n* [tf.keras guide](https://www.tensorflow.org/guide/keras) には`tf.keras`での保存とロードについて、もう少し記載されています\n\n* Eager Executionでの保存については[Saving in eager](https://www.tensorflow.org/guide/eager#object_based_saving) を参照ください\n\n* [Save and Restore](https://www.tensorflow.org/guide/saved_model)ガイドには、TensorFlowでの保存についてローレベルの詳細が記載されています", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
4a7c0e90a0304cb0ae2e0501ca0037370896226f
65,701
ipynb
Jupyter Notebook
test/query.ipynb
BhavyaGulati/datamart
7cc346fd1315bafceeea6128d158cf787e3012ab
[ "MIT" ]
null
null
null
test/query.ipynb
BhavyaGulati/datamart
7cc346fd1315bafceeea6128d158cf787e3012ab
[ "MIT" ]
null
null
null
test/query.ipynb
BhavyaGulati/datamart
7cc346fd1315bafceeea6128d158cf787e3012ab
[ "MIT" ]
null
null
null
167.178117
45,944
0.446584
[ [ [ "import sys, random, os, json\nsys.path.append(sys.path.append(os.path.join(os.getcwd(), '..')))\nfrom datamart.augment import Augment\nimport pandas as pd", "_____no_output_____" ], [ "es_index = \"datamart\"\n\naugment = Augment(es_index=es_index)", "_____no_output_____" ] ], [ [ "### Initialize a dataframe", "_____no_output_____" ] ], [ [ "old_df = pd.DataFrame(data={\n 'city': [\"los angeles\", \"New york\", \"Shanghai\", \"SAFDA\", \"manchester\"],\n 'country': [\"US\", \"US\", \"China\", \"fwfb\", \"UK\"],\n})\n\nprint(old_df)", " city country\n0 los angeles US\n1 New york US\n2 Shanghai China\n3 SAFDA fwfb\n4 manchester UK\n" ] ], [ [ "### Search metadata\nQuery by a column, which is query on variable.named_entities, by default, if a metadata matches more than half of cells in original dataframe, it is a hit. Can specify minimum should match with minimum_should_match parameter", "_____no_output_____" ] ], [ [ "hitted_metadatas = augment.query_by_column(\n col=old_df.loc[:, \"city\"], \n minimum_should_match=len(old_df.loc[:, 'city'].unique().tolist())//2)\n\nprint(len(hitted_metadatas))", "136\n" ] ], [ [ "### Query by key value pairs", "_____no_output_____" ] ], [ [ "hitted_metadatas = augment.query_by_key_value_pairs(key_value_pairs=[\n (\"description\", \"average\")\n])\n\nprint(len(hitted_metadatas))", "8\n" ] ], [ [ "### Query by temporal coverage", "_____no_output_____" ] ], [ [ "hitted_metadatas = augment.query_by_temporal_coverage(\n start=\"2018-09-23\", \n end=\"2018-09-30T00:00:00\")\n\nprint(len(hitted_metadatas))", "124\n" ] ], [ [ "With some ranking methods, say we want to augment with a specific metadata, datamart id 1230000", "_____no_output_____" ] ], [ [ "metadata = augment.query_by_datamart_id(datamart_id=1230000)[0]", "_____no_output_____" ], [ "# Take a look at some metadata\nprint(json.dumps(metadata, indent=2))", "{\n \"datamart_id\": 1230000,\n \"title\": \"TAVG\",\n \"description\": \"Average temperature (tenths of degrees C)[Note that TAVG from source 'S' corresponds to an average for the period ending at 2400 UTC rather than local midnight]\",\n \"url\": \"https://www1.ncdc.noaa.gov/pub/data/ghcn/daily/readme.txt\",\n \"keywords\": [\n \"Average Temperature.\"\n ],\n \"provenance\": \"noaa.org\",\n \"materialization\": {\n \"python_path\": \"noaa_materializer\",\n \"arguments\": {\n \"type\": \"TAVG\"\n }\n },\n \"variables\": [\n {\n \"datamart_id\": 1230001,\n \"name\": \"date\",\n \"description\": \"the date of data\",\n \"semantic_type\": [\n \"https://metadata.datadrivendiscovery.org/types/Time\"\n ],\n \"temporal_coverage\": {\n \"start\": \"1874-10-13T00:00:00\",\n \"end\": \"2018-10-01T00:00:00\"\n }\n },\n {\n \"datamart_id\": 1230002,\n \"name\": \"stationId\",\n \"description\": \"the id of station which has this data\",\n \"semantic_type\": [\n \"https://metadata.datadrivendiscovery.org/types/CategoricalData\"\n ]\n },\n {\n \"datamart_id\": 1230003,\n \"name\": \"city\",\n \"description\": \"the city data belongs to\",\n \"semantic_type\": [\n \"https://metadata.datadrivendiscovery.org/types/Location\"\n ],\n \"named_entity\": [\n \"abu dhabi\",\n \"ajman\",\n \"dubai\",\n \"sharjah\",\n \"kabul\",\n \"kandahar\",\n \"algiers\",\n \"annaba\",\n \"batna\",\n \"bechar\",\n \"bejaia\",\n \"constantine\",\n \"guelma\",\n \"laghouat\",\n \"medea\",\n \"mostaganem\",\n \"oran\",\n \"oum el bouaghi\",\n \"saida\",\n \"sidi-bel-abbes\",\n \"skikda\",\n \"tamanrasset\",\n \"tlemcen\",\n \"baku\",\n \"naxcivian\",\n \"durres\",\n \"shkoder\",\n \"tirana\",\n \"yerevan\",\n \"luanda\",\n \"lubango\",\n \"namibe\",\n \"bahia blanca\",\n \"buenos aires\",\n \"catamarca\",\n \"comodoro rivadavia\",\n \"cordoba\",\n \"corrientes\",\n \"formosa\",\n \"la plata\",\n \"la rioja\",\n \"mendoza\",\n \"neuquen\",\n \"parana\",\n \"posadas\",\n \"resistencia\",\n \"rio gallegos\",\n \"rosario\",\n \"salta\",\n \"san juan\",\n \"san luis\",\n \"san miguel de tucuman\",\n \"santa rosa\",\n \"santiago del estero\",\n \"ushuaia\",\n \"adelaide\",\n \"brisbane\",\n \"cairns\",\n \"canberra\",\n \"darwin\",\n \"melbourne\",\n \"newcastle\",\n \"perth\",\n \"rockhampton\",\n \"sydney\",\n \"townsville\",\n \"graz\",\n \"innsbruck\",\n \"klagenfurt\",\n \"salzburg\",\n \"vienna\",\n \"al-muharraq\",\n \"manama\",\n \"bridgetown\",\n \"francistown\",\n \"gaborone\",\n \"molepolole\",\n \"brussels\",\n \"gent\",\n \"hasselt\",\n \"liege\",\n \"nassau\",\n \"barisal\",\n \"chittagong\",\n \"dhaka\",\n \"rajshahi\",\n \"belize\",\n \"sarajevo\",\n \"cochabamba\",\n \"la paz\",\n \"oruro\",\n \"potosi\",\n \"santa cruz de la sierra\",\n \"sucre\",\n \"tarija\",\n \"trinidad\",\n \"mandalay\",\n \"myitkyina\",\n \"rangoon\",\n \"sagaing\",\n \"sittwe\",\n \"abomey\",\n \"cotonou\",\n \"lokossa\",\n \"natitingou\",\n \"parakou\",\n \"brest\",\n \"homyel'\",\n \"hrodna\",\n \"mahilyow\",\n \"minsk\",\n \"vitsyebsk\",\n \"honiara\",\n \"aracaju\",\n \"belem\",\n \"belo horizonte\",\n \"boa vista\",\n \"brasilia\",\n \"campo grande\",\n \"cuiaba\",\n \"curitiba\",\n \"florianopolis\",\n \"fortaleza\",\n \"goiania\",\n \"joao pessoa\",\n \"macapa\",\n \"maceio\",\n \"manaus\",\n \"natal\",\n \"niteroi\",\n \"palmas\",\n \"porto alegre\",\n \"porto velho\",\n \"recife\",\n \"rio branco\",\n \"rio de janeiro\",\n \"salvador\",\n \"santarem\",\n \"santos\",\n \"sao luis\",\n \"sao paulo\",\n \"teresina\",\n \"vitoria\",\n \"sofia\",\n \"varna\",\n \"bandar seri begawan\",\n \"bujumbura\",\n \"muyinga\",\n \"calgary\",\n \"edmonton\",\n \"fredericton\",\n \"halifax\",\n \"montreal\",\n \"ottawa\",\n \"quebec\",\n \"regina\",\n \"saint john\",\n \"saskatoon\",\n \"toronto\",\n \"vancouver\",\n \"victoria\",\n \"winnipeg\",\n \"phnom penh\",\n \"abeche\",\n \"moundou\",\n \"ndjamena\",\n \"sarh\",\n \"colombo\",\n \"trincomalee\",\n \"brazzaville\",\n \"loubomo\",\n \"pointe noire\",\n \"bandundu\",\n \"kananga\",\n \"kinshasa\",\n \"lumumbashi\",\n \"matadi\",\n \"mbandaka\",\n \"mbuji-mayi\",\n \"beijing\",\n \"changchun\",\n \"changsha\",\n \"chengdu\",\n \"chongqing\",\n \"dalian\",\n \"fushun\",\n \"fuzhou\",\n \"guangzhou\",\n \"guiyang\",\n \"hangzhou\",\n \"harbin\",\n \"hefei\",\n \"hong kong\",\n \"huhot\",\n \"jinan\",\n \"kashi\",\n \"kunming\",\n \"lanzhou\",\n \"lhasa\",\n \"macau\",\n \"nanchang\",\n \"nanjing\",\n \"nanning\",\n \"qingdao\",\n \"qiqihar\",\n \"shanghai\",\n \"shenyang\",\n \"shijiazhuang\",\n \"taiyuan\",\n \"tianjin\",\n \"urumqi\",\n \"wuhan\",\n \"xi'an\",\n \"xining\",\n \"yinchuan\",\n \"zhengzhou\",\n \"antofagasta\",\n \"concepcion\",\n \"copiapo\",\n \"coquimbo\",\n \"la serena\",\n \"puerto montt\",\n \"punta arenas\",\n \"santiago\",\n \"temuco\",\n \"bertoua\",\n \"douala\",\n \"garoua\",\n \"maroua\",\n \"ngaoundere\",\n \"arauca\",\n \"armenia\",\n \"barranquilla\",\n \"bogota\",\n \"bucaramanga\",\n \"cali\",\n \"cartagena\",\n \"cucuta\",\n \"ibague\",\n \"medellin\",\n \"monteria\",\n \"neiva\",\n \"pasto\",\n \"pereira\",\n \"quibdo\",\n \"riohacha\",\n \"san andres\",\n \"santa marta\",\n \"valledupar\",\n \"villavicencio\",\n \"puerto limon\",\n \"san jose\",\n \"bangui\",\n \"berberati\",\n \"camaguey\",\n \"havana\",\n \"matanzas\",\n \"praia\",\n \"lemesos\",\n \"nicosia\",\n \"alborg\",\n \"copenhagen\",\n \"santo domingo\",\n \"babahoyo\",\n \"loja\",\n \"portoviejo\",\n \"quito\",\n \"al ghurdaqah\",\n \"alexandria\",\n \"aswan\",\n \"cairo\",\n \"giza\",\n \"ismailia\",\n \"marsa matruh\",\n \"qena\",\n \"suez\",\n \"cork\",\n \"dublin\",\n \"galway\",\n \"bata\",\n \"malabo\",\n \"tallinn\",\n \"asmara\",\n \"nueva san salvador\",\n \"san salvador\",\n \"sonsonate\",\n \"addis ababa\",\n \"arba minch\",\n \"awasa\",\n \"debre markos\",\n \"dese\",\n \"gonder\",\n \"jima\",\n \"brno\",\n \"ostrava\",\n \"prague\",\n \"usti nad labem\",\n \"cayenne\",\n \"helsinki\",\n \"joensuu\",\n \"jyvaskyla\",\n \"kuopio\",\n \"oulu\",\n \"turku\",\n \"suva\",\n \"ajaccio\",\n \"amiens\",\n \"besancon\",\n \"caen\",\n \"clermont-ferrand\",\n \"dijon\",\n \"le havre\",\n \"lille\",\n \"limoges\",\n \"lyon\",\n \"marseille\",\n \"montpellier\",\n \"nancy\",\n \"nantes\",\n \"orleans\",\n \"paris\",\n \"poitiers\",\n \"reims\",\n \"rennes\",\n \"rouen\",\n \"strasbourg\",\n \"toulouse\",\n \"brikama\",\n \"libreville\",\n \"port gentil\",\n \"bat'umi\",\n \"sokhumi\",\n \"t'bilisi\",\n \"accra\",\n \"ho\",\n \"koforidua\",\n \"kumasi\",\n \"sekondi\",\n \"sunyani\",\n \"tamale\",\n \"wa\",\n \"berlin\",\n \"bonn\",\n \"bremen\",\n \"bremerhaven\",\n \"cologne\",\n \"dortmund\",\n \"dresden\",\n \"duisburg\",\n \"dusseldorf\",\n \"erfurt\",\n \"essen\",\n \"frankfurt\",\n \"hamburg\",\n \"hannover\",\n \"kiel\",\n \"leipzig\",\n \"magdeburg\",\n \"mainz\",\n \"munich\",\n \"potsdam\",\n \"saarbrucken\",\n \"schwerin\",\n \"stuttgart\",\n \"wiesbaden\",\n \"athens\",\n \"ioannina\",\n \"iraklion\",\n \"larisa\",\n \"piraeus\",\n \"thessaloniki\",\n \"guatemala\",\n \"conakry\",\n \"kankan\",\n \"kindia\",\n \"nzerekore\",\n \"georgetown\",\n \"la ceiba\",\n \"san pedro sula\",\n \"tegucigalpa\",\n \"rijeka\",\n \"zagreb\",\n \"budapest\",\n \"debrecen\",\n \"miskolc\",\n \"pecs\",\n \"szeged\",\n \"szombathely\",\n \"reykjavik\",\n \"ambon\",\n \"balikpapan\",\n \"banda aceh\",\n \"bandjermasin\",\n \"bengkulu\",\n \"denpasar\",\n \"jakarta\",\n \"jambi\",\n \"jayapura\",\n \"kupang\",\n \"makassar\",\n \"manado\",\n \"mataram\",\n \"medan\",\n \"padang\",\n \"palembang\",\n \"palu\",\n \"pontianak\",\n \"samarinda\",\n \"semarang\",\n \"surabaja\",\n \"tanjungkarang-telukbetung\",\n \"agartala\",\n \"ahmadabad\",\n \"aizawl\",\n \"amritsar\",\n \"bangalore\",\n \"bhopal\",\n \"bhubaneshwar\",\n \"chandigarh\",\n \"chennai\",\n \"cochin\",\n \"delhi\",\n \"hyderabad\",\n \"imphal\",\n \"jaipur\",\n \"kanpur\",\n \"kohima\",\n \"kolkata\",\n \"lucknow\",\n \"madurai\",\n \"mangalore\",\n \"mumbai\",\n \"nagpur\",\n \"new delhi\",\n \"panaji\",\n \"patna\",\n \"pondicherry\",\n \"port blair\",\n \"pune\",\n \"shillong\",\n \"simla\",\n \"trivandrum\",\n \"varanasi\",\n \"vishakhapatnam\",\n \"ahvaz\",\n \"arak\",\n \"esfahan\",\n \"hamadan\",\n \"ilam\",\n \"kerman\",\n \"kermanshah\",\n \"khorramabad\",\n \"mashhad\",\n \"rasht\",\n \"sanandaj\",\n \"semnan\",\n \"shahr-e kord\",\n \"shiraz\",\n \"tabriz\",\n \"tehran\",\n \"yazd\",\n \"zahedan\",\n \"zanjan\",\n \"beersheba\",\n \"jerusalem\",\n \"ramla\",\n \"tel aviv-yafo\",\n \"bologna\",\n \"cagliari\",\n \"campobasso\",\n \"genoa\",\n \"milan\",\n \"naples\",\n \"palermo\",\n \"rome\",\n \"trento\",\n \"trieste\",\n \"turin\",\n \"abidjan\",\n \"bondoukou\",\n \"bouake\",\n \"daloa\",\n \"dimbokro\",\n \"ferkessedougou\",\n \"gagnoa\",\n \"korhogo\",\n \"man\",\n \"yamoussoukro\",\n \"al basrah\",\n \"aomori\",\n \"fukuoka\",\n \"gifu\",\n \"hiroshima\",\n \"kawasaki\",\n \"kita kyushu\",\n \"kobe\",\n \"kyoto\",\n \"nagasaki\",\n \"nagoya\",\n \"naha\",\n \"osaka\",\n \"sapporo\",\n \"sendai\",\n \"shimonoseki\",\n \"tokyo\",\n \"yokohama\",\n \"kingston\",\n \"montego bay\",\n \"spanish town\",\n \"al mafraq\",\n \"az zarqa'\",\n \"irbid\",\n \"mombasa\",\n \"nairobi\",\n \"bishkek\",\n \"karakol\",\n \"osh\",\n \"haeju\",\n \"hyesan\",\n \"kaesong\",\n \"p'yongyang\",\n \"sariwon\",\n \"sinuiju\",\n \"wonsan\",\n \"ch'unch'on\",\n \"ch'ungju\",\n \"cheju\",\n \"chonju\",\n \"inch`on\",\n \"kwangju\",\n \"pusan\",\n \"seoul\",\n \"taegu\",\n \"taejon\",\n \"kuwait\",\n \"aktyubinsk\",\n \"almaty\",\n \"astana\",\n \"atyrau\",\n \"dzhambul\",\n \"karaganda\",\n \"kokshetau\",\n \"kostanay\",\n \"kyzylorda\",\n \"pavlodar\",\n \"petropavlovsk\",\n \"semipalatinsk\",\n \"shymkent\",\n \"taldykorgan\",\n \"ural'sk\",\n \"ust'-kamenogorsk\",\n \"zhezkazgan\",\n \"savannakhet\",\n \"vientiane\",\n \"beirut\",\n \"tripoli\",\n \"zahle\",\n \"riga\",\n \"vilnius\",\n \"monrovia\",\n \"banska bystrica\",\n \"kosice\",\n \"mafeteng\",\n \"maseru\",\n \"luxembourg\",\n \"ajdabiya\",\n \"al khums\",\n \"benghazi\",\n \"darnah\",\n \"misratah\",\n \"antananarivo\",\n \"antsiranana\",\n \"fianarantsoa\",\n \"mahajanga\",\n \"toamasina\",\n \"toliara\",\n \"fort-de-france\",\n \"chisinau\",\n \"mamoutzou\",\n \"ulaanbaatar\",\n \"blantyre\",\n \"lilongwe\",\n \"podgorica\",\n \"skopje\",\n \"bamako\",\n \"gao\",\n \"kayes\",\n \"mopti\",\n \"segou\",\n \"sikasso\",\n \"casablanca\",\n \"marrakech\",\n \"meknes\",\n \"oujda\",\n \"rabat\",\n \"port louis\",\n \"nouadhibou\",\n \"nouakchott\",\n \"muscat\",\n \"acapulco\",\n \"aguascalientes\",\n \"campeche\",\n \"chetumal\",\n \"chihuahua\",\n \"chilpancingo de los bravo\",\n \"ciudad victoria\",\n \"colima\",\n \"cuernavaca\",\n \"culiacan\",\n \"durango\",\n \"guadalajara\",\n \"guanajuato\",\n \"hermosillo\",\n \"jalapa\",\n \"mazatlan\",\n \"merida\",\n \"mexicali\",\n \"mexico\",\n \"monterrey\",\n \"morelia\",\n \"oaxaca\",\n \"pachuca\",\n \"puebla\",\n \"queretaro\",\n \"saltillo\",\n \"san luis potosi\",\n \"tampico\",\n \"tepic\",\n \"tlaxcala\",\n \"toluca\",\n \"tuxtla gutierrez\",\n \"veracruz\",\n \"villahermosa\",\n \"zacatecas\",\n \"johor baharu\",\n \"kota baharu\",\n \"kota kinabalu\",\n \"kuala lumpur\",\n \"kuantan new port\",\n \"kuching\",\n \"melaka\",\n \"pinang\",\n \"shah alam\",\n \"beira\",\n \"chimoio\",\n \"inhambane\",\n \"lichinga\",\n \"maputo\",\n \"nampula\",\n \"pemba\",\n \"quelimane\",\n \"tete\",\n \"xai-xai\",\n \"maradi\",\n \"niamey\",\n \"tahoua\",\n \"zinder\",\n \"enugu\",\n \"ilorin\",\n \"jos\",\n \"kano\",\n \"lagos\",\n \"maiduguri\",\n \"makurdi\",\n \"minna\",\n \"port harcourt\",\n \"yola\",\n \"'s-hertogenbosch\",\n \"amsterdam\",\n \"arnhem\",\n \"assen\",\n \"groningen\",\n \"haarlem\",\n \"leeuwarden\",\n \"maastricht\",\n \"rotterdam\",\n \"the hague\",\n \"utrecht\",\n \"zwolle\",\n \"bergen\",\n \"drammen\",\n \"kristiansand\",\n \"oslo\",\n \"stavanger\",\n \"tromso\",\n \"trondheim\",\n \"bhairawa\",\n \"biratnagar\",\n \"kathmandu\",\n \"willemstad\",\n \"chinandega\",\n \"esteli\",\n \"jinotega\",\n \"juigalpa\",\n \"managua\",\n \"masaya\",\n \"matagalpa\",\n \"auckland\",\n \"christchurch\",\n \"wellington\",\n \"asuncion\",\n \"coronel oviedo\",\n \"encarnacion\",\n \"arequipa\",\n \"ayacucho\",\n \"cajamarca\",\n \"callao\",\n \"chiclayo\",\n \"cuzco\",\n \"huanuco\",\n \"huaraz\",\n \"iquitos\",\n \"lima\",\n \"piura\",\n \"pucallpa\",\n \"tacna\",\n \"trujillo\",\n \"tumbes\",\n \"islamabad\",\n \"karachi\",\n \"lahore\",\n \"peshawar\",\n \"quetta\",\n \"rawalpindi\",\n \"bialystok\",\n \"elblag\",\n \"krakow\",\n \"poznan\",\n \"siedlce\",\n \"szczecin\",\n \"warsaw\",\n \"wroclaw\",\n \"colon\",\n \"panama\",\n \"aveiro\",\n \"braga\",\n \"coimbra\",\n \"evora\",\n \"funchal\",\n \"lisbon\",\n \"porto\",\n \"port moresby\",\n \"bissau\",\n \"doha\",\n \"belgrade\",\n \"saint-denis\",\n \"arad\",\n \"bacau\",\n \"baia mare\",\n \"bistrita\",\n \"botosani\",\n \"braila\",\n \"bucharest\",\n \"buzau\",\n \"calarasi\",\n \"cluj-napoca\",\n \"constanta\",\n \"craiova\",\n \"deva\",\n \"drobeta- turmu sererin\",\n \"galati\",\n \"iasi\",\n \"rimnicu vilcea\",\n \"sibiu\",\n \"suceava\",\n \"targu jiu\",\n \"timisoara\",\n \"tulcea\",\n \"manila\",\n \"mayaguez\",\n \"ponce\",\n \"abakan\",\n \"arkangel'sk\",\n \"astrakhan\",\n \"barnaul\",\n \"belgorod\",\n \"birobidzhan\",\n \"blagoveshchensk\",\n \"ceboksary\",\n \"chelyabinsk\",\n \"chita\",\n \"elista\",\n \"gor'kiy\",\n \"gorno-altaysk\",\n \"groznyy\",\n \"irkutsk\",\n \"ivanovo\",\n \"izevsk\",\n \"kaliningrad\",\n \"kaluga\",\n \"kazan'\",\n \"kemerovo\",\n \"khabarovsk\",\n \"khanty-mansiysk\",\n \"klintsy\",\n \"kostroma\",\n \"kotlas\",\n \"krasnodar\",\n \"krasnoyarsk\",\n \"kurgan\",\n \"kursk\",\n \"kuybyskev\",\n \"kyzyl\",\n \"lipetsk\",\n \"machackala\",\n \"magadan\",\n \"majkop\",\n \"moscow\",\n \"murmansk\",\n \"nazran\",\n \"novgorod\",\n \"novokuznetsk\",\n \"novosibirsk\",\n \"omsk\",\n \"orel\",\n \"orenburg\",\n \"penza\",\n \"perm'\",\n \"petropavloski-kamchatskiy\",\n \"petrozavodsk\",\n \"pskov\",\n \"rostov-on-don\",\n \"ryazan\",\n \"saint petersburg\",\n \"saransk\",\n \"saratov\",\n \"smolensk\",\n \"stavropol\",\n \"sverdlovsk\",\n \"syktyvkar\",\n \"tambov\",\n \"tomsk\",\n \"tula\",\n \"tver\",\n \"tyumen\",\n \"ufa\",\n \"ul'yanovsk\",\n \"ulan ude\",\n \"vladikavkaz\",\n \"vladimir\",\n \"vladivostok\",\n \"volgograd\",\n \"vologda\",\n \"vorkuta\",\n \"voronezh\",\n \"vyatka\",\n \"yakutsk\",\n \"yaroslavl\",\n \"yuzhno-sakhalinsk\",\n \"kigali\",\n \"abha\",\n \"jeddah\",\n \"mecca\",\n \"medina\",\n \"riyadh\",\n \"sakakah\",\n \"tabuk\",\n \"bisho\",\n \"bloemfontein\",\n \"cape town\",\n \"durban\",\n \"johannesburg\",\n \"kimberley\",\n \"mmabatho (mafikeng)\",\n \"nelspruit\",\n \"pietermaritzburg (ulundi)\",\n \"pietersburg (polokwane)\",\n \"port elizabeth\",\n \"pretoria\",\n \"richards bay\",\n \"dakar\",\n \"kolda\",\n \"thies\",\n \"ziguinchor\",\n \"ljubljana\",\n \"freetown\",\n \"singapore\",\n \"barcelona\",\n \"bilbao\",\n \"ceuta\",\n \"logrono\",\n \"madrid\",\n \"melilla\",\n \"murcia\",\n \"oviedo\",\n \"palma de mallorca\",\n \"pamplona\",\n \"santa cruz de tenerife\",\n \"santander\",\n \"santiago de compostela\",\n \"seville\",\n \"toledo\",\n \"valencia\",\n \"valladolid\",\n \"zaragoza\",\n \"el fasher\",\n \"el obeid\",\n \"khartoum\",\n \"malakal\",\n \"omdurman\",\n \"port sudan\",\n \"wau\",\n \"gavle\",\n \"goteborg\",\n \"halmstad\",\n \"jonkoping\",\n \"karlstad\",\n \"linkoping\",\n \"malmo\",\n \"orebro\",\n \"stockholm\",\n \"umea\",\n \"uppsala\",\n \"vasteras\",\n \"vaxjo\",\n \"aleppo\",\n \"damascus\",\n \"dayr az zawr\",\n \"hamah\",\n \"homs\",\n \"tartus\",\n \"basel\",\n \"geneva\",\n \"saint gallen\",\n \"zurich\",\n \"bangkok\",\n \"chang rai\",\n \"chanthaburi\",\n \"chiang mai\",\n \"chon buri\",\n \"chumphon\",\n \"kanchanaburi\",\n \"khon kaen\",\n \"lampang\",\n \"nakhon ratchasima\",\n \"nakhon si thammarat\",\n \"nong khai\",\n \"phetchabun\",\n \"phitsanulok\",\n \"phuket\",\n \"sakon nakhon\",\n \"samut prakan\",\n \"supham buri\",\n \"surat thani\",\n \"trang\",\n \"ubon ratchathani\",\n \"udon thani\",\n \"uttaradit\",\n \"dushanbe\",\n \"kulob\",\n \"leninobod\",\n \"qurghonteppa\",\n \"lome\",\n \"bizerte\",\n \"gabes\",\n \"gafsa\",\n \"jendouba\",\n \"kairouan\",\n \"sfax\",\n \"tunis\",\n \"adiyaman\",\n \"agri\",\n \"aintab\",\n \"ankara\",\n \"antalya\",\n \"aydin\",\n \"balikesir\",\n \"bingol\",\n \"bolu\",\n \"burdur\",\n \"bursa\",\n \"canakkale\",\n \"cankiri\",\n \"corum\",\n \"denizli\",\n \"diyarbakir\",\n \"edirne\",\n \"elazig\",\n \"erzincan\",\n \"erzurum\",\n \"giresun\",\n \"hakkari\",\n \"isparta\",\n \"istanbul\",\n \"izmir\",\n \"kahramanmaras\",\n \"kars\",\n \"kastamonu\",\n \"kirsehir\",\n \"konya\",\n \"kutahya\",\n \"malatya\",\n \"mersin\",\n \"mus\",\n \"nevsehir\",\n \"nigde\",\n \"rize\",\n \"sakarya\",\n \"samsun\",\n \"siirt\",\n \"sivas\",\n \"tekirdag\",\n \"tokat\",\n \"usak\",\n \"van\",\n \"yozgat\",\n \"zonguldak\",\n \"ashgabat\",\n \"mary\",\n \"turkmenbashi\",\n \"arusha\",\n \"bukoba\",\n \"dar es salaam\",\n \"dodoma\",\n \"iringa\",\n \"moshi\",\n \"mtwara\",\n \"musoma\",\n \"mwanza\",\n \"songea\",\n \"tabora\",\n \"zanzibar\",\n \"arua\",\n \"gulu\",\n \"jinja\",\n \"belfast\",\n \"birmingham\",\n \"dundee\",\n \"edinburgh\",\n \"glasgow\",\n \"leeds\",\n \"liverpool\",\n \"london\",\n \"manchester\",\n \"sheffield\",\n \"southampton\",\n \"cherkasy\",\n \"chernihiv\",\n \"chernivtsi\",\n \"dnipropetrovs'k\",\n \"donets'k\",\n \"ivano-frankivs'k\",\n \"kharkiv\",\n \"kherson\",\n \"khmel'nyts'kyz\",\n \"kirovohrad\",\n \"kovel'\",\n \"kyiv\",\n \"l'viv\",\n \"luhans'k\",\n \"mykolayiv\",\n \"odesa\",\n \"poltava\",\n \"rivne\",\n \"simferopol'\",\n \"sumy\",\n \"ternopil'\",\n \"uzhhorod\",\n \"vinnytsya\",\n \"zaporiyhzhya\",\n \"zhytomyra\",\n \"washington d.c.\",\n \"alexander\",\n \"anniston\",\n \"auburn\",\n \"cullman\",\n \"dothan\",\n \"enterprise\",\n \"eufaula\",\n \"florence\",\n \"fort payne\",\n \"gadsden\",\n \"huntsville\",\n \"jasper\",\n \"mobile\",\n \"montgomery\",\n \"selma\",\n \"talladega\",\n \"troy\",\n \"tuscaloosa\",\n \"anchorage\",\n \"fairbanks\",\n \"juneau\",\n \"nome\",\n \"seward\",\n \"bullhead\",\n \"casa grande\",\n \"douglas\",\n \"flagstaff\",\n \"green valley\",\n \"kingman\",\n \"lake havasu\",\n \"mesa\",\n \"nogales\",\n \"payson\",\n \"phoenix\",\n \"prescott\",\n \"sierra vista\",\n \"tucson\",\n \"yuma\",\n \"arkadelphia\",\n \"blytheville\",\n \"camden\",\n \"conway\",\n \"el dorado\",\n \"fayetteville\",\n \"forrest\",\n \"fort smith\",\n \"harrison\",\n \"hope\",\n \"hot springs\",\n \"jonesboro\",\n \"little rock\",\n \"magnolia\",\n \"mountain home\",\n \"paragould\",\n \"pine bluff\",\n \"russellville\",\n \"searcy\",\n \"siloam springs\",\n \"anaheim\",\n \"bakersfield\",\n \"barstow\",\n \"blythe\",\n \"chico\",\n \"clearlake\",\n \"coalinga\",\n \"el centro\",\n \"eureka\",\n \"fresno\",\n \"grass valley\",\n \"long beach\",\n \"los angeles\",\n \"merced\",\n \"modesto\",\n \"monterey\",\n \"napa\",\n \"oakland\",\n \"oceanside\",\n \"oxnard\",\n \"palm springs\",\n \"red bluff\",\n \"redding\",\n \"ridgecrest\",\n \"riverside\",\n \"rosamond\",\n \"sacramento\",\n \"salinas\",\n \"san bernardino\",\n \"san diego\",\n \"san francisco\",\n \"san jose\",\n \"san luis obispo\",\n \"santa ana\",\n \"santa barbara\",\n \"santa clarita\",\n \"santa cruz\",\n \"santa maria\",\n \"santa rosa\",\n \"simi valley\",\n \"soledad\",\n \"stockton\",\n \"susanville\",\n \"ukiah\",\n \"vallejo\",\n \"visalia\",\n \"yuba\",\n \"yucca valley\",\n \"boulder\",\n \"canon\",\n \"colorado springs\",\n \"denver\",\n \"durango\",\n \"fort collins\",\n \"fort morgan\",\n \"grand junction\",\n \"montrose\",\n \"pueblo\",\n \"sterling\",\n \"bridgeport\",\n \"danbury\",\n \"hartford\",\n \"new haven\",\n \"new london\",\n \"norwalk\",\n \"norwich\",\n \"stamford\",\n \"torrington\",\n \"waterbury\",\n \"willimantic\",\n \"dover\",\n \"newark\",\n \"belle glade\",\n \"boca raton\",\n \"boynton beach\",\n \"bradenton\",\n \"cape coral\",\n \"cocoa\",\n \"coral springs\",\n \"crestview\",\n \"daytona beach\",\n \"deltona\",\n \"destin\",\n \"fort lauderdale\",\n \"fort myers\",\n \"fort walton beach\",\n \"gainesville\",\n \"homosassa springs\",\n \"immokalee\",\n \"jacksonville\",\n \"jupiter\",\n \"key largo\",\n \"key west\",\n \"kissimmee\",\n \"marathon\",\n \"melbourne\",\n \"miami\",\n \"naples\",\n \"ocala\",\n \"orlando\",\n \"palatka\",\n \"palm coast\",\n \"panama\",\n \"pensacola\",\n \"pompano beach\",\n \"port charlotte\",\n \"port st. lucie\",\n \"sarasota\",\n \"spring hill\",\n \"st. augustine\",\n \"st. petersburg\",\n \"tallahassee\",\n \"tampa\",\n \"titusville\",\n \"west palm beach\",\n \"albany\",\n \"americus\",\n \"athens\",\n \"atlanta\",\n \"augusta\",\n \"bainbridge\",\n \"brunswick\",\n \"carrollton\",\n \"columbus\",\n \"dalton\",\n \"dublin\",\n \"fort benning south\",\n \"griffin\",\n \"hinesville\",\n \"lagrange\",\n \"macon\",\n \"milledgeville\",\n \"peachtree\",\n \"rome\",\n \"savannah\",\n \"st. marys\",\n \"statesboro\",\n \"thomasville\",\n \"tifton\",\n \"valdosta\",\n \"vidalia\",\n \"waycross\",\n \"hilo\",\n \"honolulu\",\n \"kahului\",\n \"boise\",\n \"coeur d'alene\",\n \"idaho falls\",\n \"lewiston\",\n \"moscow\",\n \"nampa\",\n \"pocatello\",\n \"twin falls\",\n \"aurora\",\n \"bloomington\",\n \"carbondale\",\n \"champaign\",\n \"charleston\",\n \"chicago\",\n \"crystal lake\",\n \"danville\",\n \"dekalb\",\n \"decatur\",\n \"dixon\",\n \"effingham\",\n \"elgin\",\n \"freeport\",\n \"galesburg\",\n \"joliet\",\n \"kankakee\",\n \"kewanee\",\n \"lincoln\",\n \"macomb\",\n \"mount vernon\",\n \"naperville\",\n \"ottawa\",\n \"peoria\",\n \"pontiac\",\n \"quincy\",\n \"rockford\",\n \"springfield\",\n \"streator\",\n \"waukegan\",\n \"evansville\",\n \"fort wayne\",\n \"indianapolis\",\n \"kokomo\",\n \"lafayette\",\n \"madison\",\n \"michigan\",\n \"muncie\",\n \"richmond\",\n \"shelbyville\",\n \"south bend\",\n \"terre haute\",\n \"vincennes\",\n \"ames\",\n \"burlington\",\n \"carroll\",\n \"cedar falls\",\n \"cedar rapids\",\n \"clinton\",\n \"davenport\",\n \"des moines\",\n \"dubuque\",\n \"fort dodge\",\n \"iowa\",\n \"keokuk\",\n \"marshalltown\",\n \"mason\",\n \"oskaloosa\",\n \"ottumwa\",\n \"sioux\",\n \"spencer\",\n \"storm lake\",\n \"waterloo\",\n \"coffeyville\",\n \"dodge\",\n \"emporia\",\n \"garden\",\n \"great bend\",\n \"hays\",\n \"hutchinson\",\n \"lawrence\",\n \"leavenworth\",\n \"liberal\",\n \"manhattan\",\n \"pittsburg\",\n \"salina\",\n \"topeka\",\n \"wichita\",\n \"winfield\",\n \"bowling green\",\n \"campbellsville\",\n \"elizabethtown\",\n \"fort knox\",\n \"frankfort\",\n \"hopkinsville\",\n \"lexington\",\n \"louisville\",\n \"madisonville\",\n \"middlesborough\",\n \"murray\",\n \"owensboro\",\n \"paducah\",\n \"somerset\",\n \"alexandria\",\n \"bastrop\",\n \"baton rouge\",\n \"bogalusa\",\n \"fort polk south\",\n \"hammond\",\n \"houma\",\n \"jennings\",\n \"lake charles\",\n \"minden\",\n \"monroe\",\n \"morgan\",\n \"natchitoches\",\n \"new iberia\",\n \"new orleans\",\n \"opelousas\",\n \"ruston\",\n \"shreveport\",\n \"thibodaux\",\n \"bangor\",\n \"portland\",\n \"waterville\",\n \"annapolis\",\n \"baltimore\",\n \"cambridge\",\n \"cumberland\",\n \"easton\",\n \"frederick\",\n \"hagerstown\",\n \"ocean pines\",\n \"salisbury\",\n \"westminster\",\n \"barnstable town\",\n \"boston\",\n \"brockton\",\n \"fall river\",\n \"gloucester\",\n \"greenfield\",\n \"leominster\",\n \"lowell\",\n \"new bedford\",\n \"north adams\",\n \"northampton\",\n \"pittsfield\",\n \"worcester\",\n \"alpena\",\n \"ann arbor\",\n \"benton harbor\",\n \"big rapids\",\n \"cadillac\",\n \"detroit\",\n \"escanaba\",\n \"flint\",\n \"grand rapids\",\n \"holland\",\n \"jackson\",\n \"kalamazoo\",\n \"lansing\",\n \"marquette\",\n \"midland\",\n \"mount pleasant\",\n \"muskegon\",\n \"owosso\",\n \"port huron\",\n \"saginaw\",\n \"sault ste. marie\",\n \"sturgis\",\n \"traverse\",\n \"austin\",\n \"bemidji\",\n \"brainerd\",\n \"buffalo\",\n \"duluth\",\n \"fairmont\",\n \"faribault\",\n \"fergus falls\",\n \"hibbing\",\n \"mankato\",\n \"marshall\",\n \"minneapolis\",\n \"new ulm\",\n \"owatonna\",\n \"rochester\",\n \"saint paul\",\n \"st. cloud\",\n \"willmar\",\n \"worthington\",\n \"biloxi\",\n \"clarksdale\",\n \"cleveland\",\n \"corinth\",\n \"greenville\",\n \"greenwood\",\n \"grenada\",\n \"gulfport\",\n \"hattiesburg\",\n \"laurel\",\n \"mccomb\",\n \"meridian\",\n \"natchez\",\n \"oxford\",\n \"picayune\",\n \"tupelo\",\n \"vicksburg\",\n \"yazoo\",\n \"cape girardeau\",\n \"columbia\",\n \"excelsior springs\",\n \"farmington\",\n \"fort leonard wood\",\n \"jefferson\",\n \"joplin\",\n \"kansas\",\n \"kennett\",\n \"kirksville\",\n \"lebanon\",\n \"maryville\",\n \"moberly\",\n \"poplar bluff\",\n \"rolla\",\n \"sedalia\",\n \"sikeston\",\n \"st. joseph\",\n \"st. louis\",\n \"warrensburg\",\n \"washington\",\n \"west plains\",\n \"billings\",\n \"bozeman\",\n \"butte\",\n \"great falls\",\n \"helena\",\n \"kalispell\",\n \"missoula\",\n \"beatrice\",\n \"fremont\",\n \"grand island\",\n \"hastings\",\n \"kearney\",\n \"norfolk\",\n \"north platte\",\n \"omaha\",\n \"scottsbluff\",\n \"boulder\",\n \"carson\",\n \"elko\",\n \"las vegas\",\n \"pahrump\",\n \"reno\",\n \"berlin\",\n \"claremont\",\n \"concord\",\n \"keene\",\n \"laconia\",\n \"nashua\",\n \"portsmouth\",\n \"jersey\",\n \"lakewood\",\n \"pleasantville\",\n \"toms river\",\n \"trenton\",\n \"vineland\",\n \"alamogordo\",\n \"albuquerque\",\n \"carlsbad\",\n \"clovis\",\n \"deming\",\n \"gallup\",\n \"las cruces\",\n \"roswell\",\n \"santa fe\",\n \"silver\",\n \"amsterdam\",\n \"binghamton\",\n \"brentwood\",\n \"brooklyn\",\n \"commack\",\n \"coram\",\n \"elmira\",\n \"hempstead\",\n \"huntington station\",\n \"ithaca\",\n \"jamestown\",\n \"kingston\",\n \"levittown\",\n \"massena\",\n \"middletown\",\n \"new\",\n \"new york\",\n \"niagara falls\",\n \"ogdensburg\",\n \"oneonta\",\n \"oswego\",\n \"plattsburgh\",\n \"poughkeepsie\",\n \"saratoga springs\",\n \"syracuse\",\n \"utica\",\n \"watertown\",\n \"yonkers\",\n \"asheboro\",\n \"asheville\",\n \"boone\",\n \"charlotte\",\n \"durham\",\n \"elizabeth\",\n \"fort bragg\",\n \"gastonia\",\n \"greensboro\",\n \"hendersonville\",\n \"hickory\",\n \"new bern\",\n \"raleigh\",\n \"roanoke rapids\",\n \"rocky mount\",\n \"statesville\",\n \"wilmington\",\n \"winston-salem\",\n \"bismarck\",\n \"dickinson\",\n \"fargo\",\n \"grand forks\",\n \"minot\",\n \"williston\",\n \"akron\",\n \"ashland\",\n \"ashtabula\",\n \"canton\",\n \"chillicothe\",\n \"cincinnati\",\n \"dayton\",\n \"defiance\",\n \"delaware\",\n \"lima\",\n \"mansfield\",\n \"marion\",\n \"mentor\",\n \"new philadelphia\",\n \"salem\",\n \"sandusky\",\n \"steubenville\",\n \"toledo\",\n \"urbana\",\n \"wooster\",\n \"youngstown\",\n \"zanesville\",\n \"ada\",\n \"altus\",\n \"ardmore\",\n \"bartlesville\",\n \"chickasha\",\n \"durant\",\n \"elk\",\n \"enid\",\n \"guymon\",\n \"lawton\",\n \"mcalester\",\n \"muskogee\",\n \"oklahoma\",\n \"okmulgee\",\n \"ponca\",\n \"shawnee\",\n \"stillwater\",\n \"tahlequah\",\n \"tulsa\",\n \"woodward\",\n \"bend\",\n \"coos bay\",\n \"corvallis\",\n \"eugene\",\n \"grants pass\",\n \"hermiston\",\n \"hillsboro\",\n \"klamath falls\",\n \"la grande\",\n \"medford\",\n \"ontario\",\n \"pendleton\",\n \"roseburg\",\n \"st. helens\",\n \"allentown\",\n \"altoona\",\n \"bethlehem\",\n \"bloomsburg\",\n \"chambersburg\",\n \"chester\",\n \"erie\",\n \"hanover\",\n \"harrisburg\",\n \"hazleton\",\n \"johnstown\",\n \"lancaster\",\n \"meadville\",\n \"philadelphia\",\n \"pittsburgh\",\n \"reading\",\n \"scranton\",\n \"state college\",\n \"uniontown\",\n \"wilkes-barre\",\n \"williamsport\",\n \"york\",\n \"newport\",\n \"providence\",\n \"westerly\",\n \"clemson\",\n \"hilton head island\",\n \"myrtle beach\",\n \"orangeburg\",\n \"rock hill\",\n \"spartanburg\",\n \"sumter\",\n \"aberdeen\",\n \"brookings\",\n \"huron\",\n \"mitchell\",\n \"pierre\",\n \"rapid\",\n \"sioux falls\",\n \"yankton\",\n \"bristol\",\n \"chattanooga\",\n \"clarksville\",\n \"cookeville\",\n \"dyersburg\",\n \"johnson\",\n \"kingsport\",\n \"knoxville\",\n \"mcminnville\",\n \"memphis\",\n \"morristown\",\n \"murfreesboro\",\n \"nashville\",\n \"union\",\n \"abilene\",\n \"amarillo\",\n \"arlington\",\n \"bay\",\n \"beaumont\",\n \"beeville\",\n \"brenham\",\n \"brownsville\",\n \"bryan\",\n \"college station\",\n \"conroe\",\n \"corpus christi\",\n \"corsicana\",\n \"dallas\",\n \"del rio\",\n \"denton\",\n \"eagle pass\",\n \"el campo\",\n \"el paso\",\n \"fort hood\",\n \"fort worth\",\n \"galveston\",\n \"gatesville\",\n \"harlingen\",\n \"henderson\",\n \"houston\",\n \"irving\",\n \"katy\",\n \"kerrville\",\n \"killeen\",\n \"kingsville\",\n \"lake jackson\",\n \"laredo\",\n \"longview\",\n \"lubbock\",\n \"lufkin\",\n \"mcallen\",\n \"mineral wells\",\n \"nacogdoches\",\n \"new braunfels\",\n \"odessa\",\n \"palestine\",\n \"paris\",\n \"plano\",\n \"port arthur\",\n \"rio grande\",\n \"round rock\",\n \"san antonio\",\n \"san marcos\",\n \"sherman\",\n \"stephenville\",\n \"sulphur springs\",\n \"texarkana\",\n \"the woodlands\",\n \"tyler\",\n \"uvalde\",\n \"vernon\",\n \"victoria\",\n \"waco\",\n \"waxahachie\",\n \"wichita falls\",\n \"brigham\",\n \"cedar\",\n \"logan\",\n \"ogden\",\n \"provo\",\n \"salt lake\",\n \"st. george\",\n \"montpelier\",\n \"rutland\",\n \"blacksburg\",\n \"centreville\",\n \"charlottesville\",\n \"chesapeake\",\n \"fredericksburg\",\n \"hampton\",\n \"harrisonburg\",\n \"leesburg\",\n \"lynchburg\",\n \"newport news\",\n \"roanoke\",\n \"virginia beach\",\n \"winchester\",\n \"anacortes\",\n \"bellevue\",\n \"bellingham\",\n \"bremerton\",\n \"centralia\",\n \"ellensburg\",\n \"everett\",\n \"federal way\",\n \"kennewick\",\n \"moses lake\",\n \"oak harbor\",\n \"olympia\",\n \"port angeles\",\n \"pullman\",\n \"redmond\",\n \"seattle\",\n \"spokane\",\n \"sunnyside\",\n \"tacoma\",\n \"vancouver\",\n \"walla walla\",\n \"wenatchee\",\n \"yakima\",\n \"beckley\",\n \"bluefield\",\n \"huntington\",\n \"martinsburg\",\n \"morgantown\",\n \"parkersburg\",\n \"wheeling\",\n \"eau claire\",\n \"green bay\",\n \"janesville\",\n \"kenosha\",\n \"la crosse\",\n \"manitowoc\",\n \"marinette\",\n \"milwaukee\",\n \"oshkosh\",\n \"racine\",\n \"river falls\",\n \"sheboygan\",\n \"wausau\",\n \"casper\",\n \"cheyenne\",\n \"evanston\",\n \"gillette\",\n \"laramie\",\n \"rock springs\",\n \"sheridan\",\n \"bobo dioulasso\",\n \"ouagadougou\",\n \"ouahigouya\",\n \"melo\",\n \"montevideo\",\n \"paysandu\",\n \"rivera\",\n \"salto\",\n \"tacuarembo\",\n \"andizhan\",\n \"bukhara\",\n \"dzhizak\",\n \"fergana\",\n \"gulistan\",\n \"karshi\",\n \"namangan\",\n \"navoi\",\n \"nukus\",\n \"samarkand\",\n \"tashkent\",\n \"termez\",\n \"urgench\",\n \"barcelona\",\n \"barquisimeto\",\n \"caracas\",\n \"ciudad bolivar\",\n \"coro\",\n \"cumana\",\n \"guanare\",\n \"maracaibo\",\n \"maturin\",\n \"merida\",\n \"puerto ayacucho\",\n \"puerto la cruz\",\n \"san carlos\",\n \"san cristobal\",\n \"san felipe\",\n \"san juan de los morros\",\n \"valencia\",\n \"bien hoa\",\n \"buon me thuot\",\n \"can tho\",\n \"da lat\",\n \"da nang\",\n \"haiphong\",\n \"hanoi\",\n \"ho chi minh\",\n \"hue\",\n \"my tho\",\n \"nha trang\",\n \"phan thiet\",\n \"play cu\",\n \"qui nhon\",\n \"soc trang\",\n \"tan an\",\n \"thanh hoa\",\n \"truc giang\",\n \"tuy hoa\",\n \"vinh long\",\n \"walvis bay\",\n \"windhoek\",\n \"manzini\",\n \"mbabane\",\n \"chipata\",\n \"kabwe\",\n \"kasama\",\n \"livingstone\",\n \"lusaka\",\n \"mongu\",\n \"ndola\",\n \"bulawayo\",\n \"gweru\",\n \"harare\",\n \"masvingo\"\n ]\n },\n {\n \"datamart_id\": 1230004,\n \"name\": \"TAVG\",\n \"description\": \"Average temperature (tenths of degrees C)[Note that TAVG from source 'S' corresponds to an average for the period ending at 2400 UTC rather than local midnight]\",\n \"semantic_type\": [\n \"http://schema.org/Float\"\n ]\n }\n ]\n}\n" ] ], [ [ "### Materialize dataset with constrains", "_____no_output_____" ] ], [ [ "# Get me subset of the dataset only related to my cities in old_df and time range from 2018-09-23 to 2018-09-30\nnew_df = augment.get_dataset(metadata=metadata, variables=None, constrains={\n \"locations\": old_df.loc[:, 'city'].unique().tolist(),\n \"date_range\": {\n \"start\": \"2018-09-23T00:00:00\",\n \"end\": \"2018-09-30T00:00:00\"\n }\n })", "_____no_output_____" ], [ "print(new_df.iloc[random.sample(range(1, new_df.shape[0]), 10), :])", " date stationid city TAVG\n106 2018-09-23T00:00:00 GHCND:USW00094789 New york 173\n14 2018-09-24T00:00:00 GHCND:USR0000CBEV los angeles 186\n22 2018-09-24T00:00:00 GHCND:USR0000CSFD los angeles 204\n39 2018-09-26T00:00:00 GHCND:USR0000CACT los angeles 248\n134 2018-09-26T00:00:00 GHCND:CHM00058367 Shanghai 235\n25 2018-09-24T00:00:00 GHCND:USW00023174 los angeles 196\n40 2018-09-26T00:00:00 GHCND:USR0000CBEV los angeles 204\n8 2018-09-23T00:00:00 GHCND:USR0000CMIL los angeles 212\n101 2018-09-30T00:00:00 GHCND:USR0000CWHH los angeles 224\n1 2018-09-23T00:00:00 GHCND:USR0000CBEV los angeles 206\n" ] ], [ [ "### Join\nThere are many ways of join between original dataframe and new dataframe.\nSimplest solution if right join which will produce lots of rows for same city.", "_____no_output_____" ] ], [ [ "df = pd.merge(left=old_df, right=new_df, left_on='city', right_on='city', how='outer')\nprint(df)", " city country date stationid TAVG\n0 los angeles US 2018-09-23T00:00:00 GHCND:USR0000CACT 233.0\n1 los angeles US 2018-09-23T00:00:00 GHCND:USR0000CBEV 206.0\n2 los angeles US 2018-09-23T00:00:00 GHCND:USR0000CCHB 228.0\n3 los angeles US 2018-09-23T00:00:00 GHCND:USR0000CCHI 218.0\n4 los angeles US 2018-09-23T00:00:00 GHCND:USR0000CCLE 237.0\n5 los angeles US 2018-09-23T00:00:00 GHCND:USR0000CCP9 224.0\n6 los angeles US 2018-09-23T00:00:00 GHCND:USR0000CLTU 215.0\n7 los angeles US 2018-09-23T00:00:00 GHCND:USR0000CMAL 197.0\n8 los angeles US 2018-09-23T00:00:00 GHCND:USR0000CMIL 212.0\n9 los angeles US 2018-09-23T00:00:00 GHCND:USR0000CSFD 222.0\n10 los angeles US 2018-09-23T00:00:00 GHCND:USR0000CWHH 206.0\n11 los angeles US 2018-09-23T00:00:00 GHCND:USW00023129 214.0\n12 los angeles US 2018-09-23T00:00:00 GHCND:USW00023174 201.0\n13 los angeles US 2018-09-24T00:00:00 GHCND:USR0000CACT 206.0\n14 los angeles US 2018-09-24T00:00:00 GHCND:USR0000CBEV 186.0\n15 los angeles US 2018-09-24T00:00:00 GHCND:USR0000CCHB 185.0\n16 los angeles US 2018-09-24T00:00:00 GHCND:USR0000CCHI 208.0\n17 los angeles US 2018-09-24T00:00:00 GHCND:USR0000CCLE 217.0\n18 los angeles US 2018-09-24T00:00:00 GHCND:USR0000CCP9 207.0\n19 los angeles US 2018-09-24T00:00:00 GHCND:USR0000CLTU 181.0\n20 los angeles US 2018-09-24T00:00:00 GHCND:USR0000CMAL 163.0\n21 los angeles US 2018-09-24T00:00:00 GHCND:USR0000CMIL 204.0\n22 los angeles US 2018-09-24T00:00:00 GHCND:USR0000CSFD 204.0\n23 los angeles US 2018-09-24T00:00:00 GHCND:USR0000CWHH 187.0\n24 los angeles US 2018-09-24T00:00:00 GHCND:USW00023129 203.0\n25 los angeles US 2018-09-24T00:00:00 GHCND:USW00023174 196.0\n26 los angeles US 2018-09-25T00:00:00 GHCND:USR0000CACT 224.0\n27 los angeles US 2018-09-25T00:00:00 GHCND:USR0000CBEV 176.0\n28 los angeles US 2018-09-25T00:00:00 GHCND:USR0000CCHB 182.0\n29 los angeles US 2018-09-25T00:00:00 GHCND:USR0000CCHI 229.0\n.. ... ... ... ... ...\n122 New york US 2018-09-29T00:00:00 GHCND:USW00014732 189.0\n123 New york US 2018-09-29T00:00:00 GHCND:USW00014734 176.0\n124 New york US 2018-09-29T00:00:00 GHCND:USW00094789 181.0\n125 New york US 2018-09-30T00:00:00 GHCND:USW00014732 184.0\n126 New york US 2018-09-30T00:00:00 GHCND:USW00014734 167.0\n127 New york US 2018-09-30T00:00:00 GHCND:USW00094789 172.0\n128 Shanghai China 2018-09-23T00:00:00 GHCND:CHM00058362 253.0\n129 Shanghai China 2018-09-23T00:00:00 GHCND:CHM00058367 256.0\n130 Shanghai China 2018-09-24T00:00:00 GHCND:CHM00058367 210.0\n131 Shanghai China 2018-09-25T00:00:00 GHCND:CHM00058362 232.0\n132 Shanghai China 2018-09-25T00:00:00 GHCND:CHM00058367 234.0\n133 Shanghai China 2018-09-26T00:00:00 GHCND:CHM00058362 234.0\n134 Shanghai China 2018-09-26T00:00:00 GHCND:CHM00058367 235.0\n135 Shanghai China 2018-09-27T00:00:00 GHCND:CHM00058362 234.0\n136 Shanghai China 2018-09-27T00:00:00 GHCND:CHM00058367 238.0\n137 Shanghai China 2018-09-28T00:00:00 GHCND:CHM00058362 239.0\n138 Shanghai China 2018-09-28T00:00:00 GHCND:CHM00058367 239.0\n139 Shanghai China 2018-09-29T00:00:00 GHCND:CHM00058362 234.0\n140 Shanghai China 2018-09-29T00:00:00 GHCND:CHM00058367 231.0\n141 Shanghai China 2018-09-30T00:00:00 GHCND:CHM00058362 228.0\n142 Shanghai China 2018-09-30T00:00:00 GHCND:CHM00058367 236.0\n143 SAFDA fwfb NaN NaN NaN\n144 manchester UK 2018-09-23T00:00:00 GHCND:USW00014745 113.0\n145 manchester UK 2018-09-24T00:00:00 GHCND:USW00014745 103.0\n146 manchester UK 2018-09-25T00:00:00 GHCND:USW00014745 92.0\n147 manchester UK 2018-09-26T00:00:00 GHCND:USW00014745 198.0\n148 manchester UK 2018-09-27T00:00:00 GHCND:USW00014745 187.0\n149 manchester UK 2018-09-28T00:00:00 GHCND:USW00014745 144.0\n150 manchester UK 2018-09-29T00:00:00 GHCND:USW00014745 128.0\n151 manchester UK 2018-09-30T00:00:00 GHCND:USW00014745 107.0\n\n[152 rows x 5 columns]\n" ] ], [ [ "#### Aggregation\nJoin also can be performed based on aggregation.", "_____no_output_____" ] ], [ [ "# Aggregate on city\nnew_df_aggregated = new_df.groupby([\"city\"], as_index=False)[\"TAVG\"].mean()\nprint(new_df_aggregated)", " city TAVG\n0 New york 188.416667\n1 Shanghai 235.533333\n2 los angeles 212.057692\n3 manchester 134.000000\n" ], [ "df = pd.merge(left=old_df, right=new_df_aggregated, left_on='city', right_on='city', how='outer')\nprint(df)", " city country TAVG\n0 los angeles US 212.057692\n1 New york US 188.416667\n2 Shanghai China 235.533333\n3 SAFDA fwfb NaN\n4 manchester UK 134.000000\n" ], [ "# Aggregate on city and date\nnew_df_aggregated = new_df.groupby([\"city\", \"date\"], as_index=False)[\"TAVG\"].mean()\nprint(new_df_aggregated)", " city date TAVG\n0 New york 2018-09-23T00:00:00 174.000000\n1 New york 2018-09-24T00:00:00 176.333333\n2 New york 2018-09-25T00:00:00 184.666667\n3 New york 2018-09-26T00:00:00 239.333333\n4 New york 2018-09-27T00:00:00 206.666667\n5 New york 2018-09-28T00:00:00 170.000000\n6 New york 2018-09-29T00:00:00 182.000000\n7 New york 2018-09-30T00:00:00 174.333333\n8 Shanghai 2018-09-23T00:00:00 254.500000\n9 Shanghai 2018-09-24T00:00:00 210.000000\n10 Shanghai 2018-09-25T00:00:00 233.000000\n11 Shanghai 2018-09-26T00:00:00 234.500000\n12 Shanghai 2018-09-27T00:00:00 236.000000\n13 Shanghai 2018-09-28T00:00:00 239.000000\n14 Shanghai 2018-09-29T00:00:00 232.500000\n15 Shanghai 2018-09-30T00:00:00 232.000000\n16 los angeles 2018-09-23T00:00:00 216.384615\n17 los angeles 2018-09-24T00:00:00 195.923077\n18 los angeles 2018-09-25T00:00:00 202.923077\n19 los angeles 2018-09-26T00:00:00 227.538462\n20 los angeles 2018-09-27T00:00:00 238.692308\n21 los angeles 2018-09-28T00:00:00 212.615385\n22 los angeles 2018-09-29T00:00:00 192.692308\n23 los angeles 2018-09-30T00:00:00 209.692308\n24 manchester 2018-09-23T00:00:00 113.000000\n25 manchester 2018-09-24T00:00:00 103.000000\n26 manchester 2018-09-25T00:00:00 92.000000\n27 manchester 2018-09-26T00:00:00 198.000000\n28 manchester 2018-09-27T00:00:00 187.000000\n29 manchester 2018-09-28T00:00:00 144.000000\n30 manchester 2018-09-29T00:00:00 128.000000\n31 manchester 2018-09-30T00:00:00 107.000000\n" ], [ "df = pd.merge(left=old_df, right=new_df_aggregated, left_on='city', right_on='city', how='outer')\nprint(df)", " city country date TAVG\n0 los angeles US 2018-09-23T00:00:00 216.384615\n1 los angeles US 2018-09-24T00:00:00 195.923077\n2 los angeles US 2018-09-25T00:00:00 202.923077\n3 los angeles US 2018-09-26T00:00:00 227.538462\n4 los angeles US 2018-09-27T00:00:00 238.692308\n5 los angeles US 2018-09-28T00:00:00 212.615385\n6 los angeles US 2018-09-29T00:00:00 192.692308\n7 los angeles US 2018-09-30T00:00:00 209.692308\n8 New york US 2018-09-23T00:00:00 174.000000\n9 New york US 2018-09-24T00:00:00 176.333333\n10 New york US 2018-09-25T00:00:00 184.666667\n11 New york US 2018-09-26T00:00:00 239.333333\n12 New york US 2018-09-27T00:00:00 206.666667\n13 New york US 2018-09-28T00:00:00 170.000000\n14 New york US 2018-09-29T00:00:00 182.000000\n15 New york US 2018-09-30T00:00:00 174.333333\n16 Shanghai China 2018-09-23T00:00:00 254.500000\n17 Shanghai China 2018-09-24T00:00:00 210.000000\n18 Shanghai China 2018-09-25T00:00:00 233.000000\n19 Shanghai China 2018-09-26T00:00:00 234.500000\n20 Shanghai China 2018-09-27T00:00:00 236.000000\n21 Shanghai China 2018-09-28T00:00:00 239.000000\n22 Shanghai China 2018-09-29T00:00:00 232.500000\n23 Shanghai China 2018-09-30T00:00:00 232.000000\n24 SAFDA fwfb NaN NaN\n25 manchester UK 2018-09-23T00:00:00 113.000000\n26 manchester UK 2018-09-24T00:00:00 103.000000\n27 manchester UK 2018-09-25T00:00:00 92.000000\n28 manchester UK 2018-09-26T00:00:00 198.000000\n29 manchester UK 2018-09-27T00:00:00 187.000000\n30 manchester UK 2018-09-28T00:00:00 144.000000\n31 manchester UK 2018-09-29T00:00:00 128.000000\n32 manchester UK 2018-09-30T00:00:00 107.000000\n" ] ], [ [ "We can also unstack new datagrame to form more columns and so that we will not produce extra rows", "_____no_output_____" ] ], [ [ "new_df_unstacked = new_df.groupby([\"city\", \"date\"])[\"TAVG\"].mean().unstack().reset_index(level=['city'])\nprint(new_df_unstacked)", "date city 2018-09-23T00:00:00 2018-09-24T00:00:00 \\\n0 New york 174.000000 176.333333 \n1 Shanghai 254.500000 210.000000 \n2 los angeles 216.384615 195.923077 \n3 manchester 113.000000 103.000000 \n\ndate 2018-09-25T00:00:00 2018-09-26T00:00:00 2018-09-27T00:00:00 \\\n0 184.666667 239.333333 206.666667 \n1 233.000000 234.500000 236.000000 \n2 202.923077 227.538462 238.692308 \n3 92.000000 198.000000 187.000000 \n\ndate 2018-09-28T00:00:00 2018-09-29T00:00:00 2018-09-30T00:00:00 \n0 170.000000 182.000000 174.333333 \n1 239.000000 232.500000 232.000000 \n2 212.615385 192.692308 209.692308 \n3 144.000000 128.000000 107.000000 \n" ], [ "df = pd.merge(left=old_df, right=new_df_unstacked, left_on='city', right_on='city', how='outer')\nprint(df)", " city country 2018-09-23T00:00:00 2018-09-24T00:00:00 \\\n0 los angeles US 216.384615 195.923077 \n1 New york US 174.000000 176.333333 \n2 Shanghai China 254.500000 210.000000 \n3 SAFDA fwfb NaN NaN \n4 manchester UK 113.000000 103.000000 \n\n 2018-09-25T00:00:00 2018-09-26T00:00:00 2018-09-27T00:00:00 \\\n0 202.923077 227.538462 238.692308 \n1 184.666667 239.333333 206.666667 \n2 233.000000 234.500000 236.000000 \n3 NaN NaN NaN \n4 92.000000 198.000000 187.000000 \n\n 2018-09-28T00:00:00 2018-09-29T00:00:00 2018-09-30T00:00:00 \n0 212.615385 192.692308 209.692308 \n1 170.000000 182.000000 174.333333 \n2 239.000000 232.500000 232.000000 \n3 NaN NaN NaN \n4 144.000000 128.000000 107.000000 \n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
4a7c1419d9f9f9b93a396a8f66ecb17a42f7dd8d
27,490
ipynb
Jupyter Notebook
_notebooks/2022-01-01-intro.ipynb
rhkrehtjd/INTROml
784b7c4037e3810158b515f2d1893eb78387c20f
[ "Apache-2.0" ]
null
null
null
_notebooks/2022-01-01-intro.ipynb
rhkrehtjd/INTROml
784b7c4037e3810158b515f2d1893eb78387c20f
[ "Apache-2.0" ]
null
null
null
_notebooks/2022-01-01-intro.ipynb
rhkrehtjd/INTROml
784b7c4037e3810158b515f2d1893eb78387c20f
[ "Apache-2.0" ]
null
null
null
29.97819
273
0.506657
[ [ [ "# 2022/01/01/SAT(HappyNewYear)", "_____no_output_____" ], [ "datail-review 해보자\n\n- feature_names = 높이,가로 길이 이런 것들, data = 각 featuredml 값들, target = 0,1,2...예를 들면 붓꽃의 이름을 대용한 것, target_names = 각 target이 가리키는 이름이 무엇인지?", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "model_selection 모듈은 학습 데이터와 테스트 데이터 세트를 분리하거나 교차 검증 분할 및 평가, 그리고 Estimator의 하이퍼 파라미터를 튜닝하기 위한 다양한 함수와 클래스를 제공, 전체 데이터를 학습 데이터와 테스트 데이터 세트로 분리해주는 train_test_split()부터 살펴보자", "_____no_output_____" ] ], [ [ "from sklearn.datasets import load_iris\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.metrics import accuracy_score\niris=load_iris() # 붓꽃 데이터 세트 로딩\ndt_clf=DecisionTreeClassifier()\ntrain_data=iris.data # 데이터 세트에서 feature만으로 구성된 데이터가 ndarray\ntrain_label=iris.target # 데이터 세트에서 label 데이터\ndt_clf.fit(train_data, train_label) # 학습 수행중\npred=dt_clf.predict(train_data) # 예측 수행중 // 그런데 학습때 사용했던 train_data를 사용했음 -> 예측도 1 나올 것\nprint('예측도: ',accuracy_score(train_label,pred))", "예측도: 1.0\n" ] ], [ [ "- 정확도가 100% 나왔음 $\\to$ 이미 학습한 학습 데이터 세트를 기반으로 예측했기 때문. 답을 알고 있는데 같은 문제를 낸 것이나 마찬가지\n- 따라서 예측을 수행하는 데이터 세트는 학습을 수행한 학습용 데이터 세트가 아닌 전용의 테스트 데이터 세트여야 함.", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split", "_____no_output_____" ], [ "dt_clf=DecisionTreeClassifier()\niris=load_iris()\n# train_test_split()의 반환값은 튜플 형태이다. 순차적으로 네가지 요소들을 반환한다\nX_train,X_test,y_train,y_test=train_test_split(iris.data, iris.target,test_size=0.3,random_state=121)\ndt_clf.fit(X_train,y_train)\npred = dt_clf.predict(X_test)\nprint('예측 정확도: {:.4f}'.format(accuracy_score(y_test,pred)))", "예측 정확도: 0.9556\n" ] ], [ [ "---", "_____no_output_____" ], [ "지금까지의 방법은 모델이 학습 데이터에만 과도하게 최적화되어, 실제 예측을 다른 데이터로 수행할 경우에는 예측 성능이 과도하게 떨어지는 `과적합`이 발생할 수 있다. 즉 해당 테스트 데이터에만 과적합되는 학습 모델이 만들어져 다른 테스트용 데이터가들어올 경우에는 성능이 저하된다. $\\to$ 개선하기 위해 `교차검증`을 이용해 다양한 학습과 평가를 수행해야 한다.", "_____no_output_____" ], [ "> 교차검증?", "_____no_output_____" ], [ ": 본고사 치르기 전, 여러 모의고사를 치르는 것. 즉 본고사가 테스트 데이터 세트에 대해 평가하는 것이라면 모의고사는 교차 검증에서 많은 학습과 검증 세트에서 알고리즘 학습과 평가를 수행하는 것.", "_____no_output_____" ], [ ": 학습 데이터 세트를 검증 데이터 세트와 학습 데이터 세트로 분할하여 수행한 뒤, 모든 학습/검증 과정이 완료된 후 최종적으로 성능을 평가하기 위해 테스트 데이터 세트를 마련함.", "_____no_output_____" ], [ "> K fold 교차 검증?", "_____no_output_____" ], [ ": K개의 데이터 폴드 세트를 만들어서 K번만큼 각 폴드 세트에 학습과 검증, 평가를 반복적으로 수행 / 개괄적 과정은 교재 104 참고", "_____no_output_____" ], [ "- 실습해보자", "_____no_output_____" ] ], [ [ "import numpy as np", "_____no_output_____" ], [ "from sklearn.datasets import load_iris\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import KFold # 위에서는 trian_test_split을 import했었음\niris=load_iris() # 붓꽃 데이터 세트 로딩\nfeatures=iris.data\nlabel=iris.target\ndt_clf=DecisionTreeClassifier(random_state=156)\nkfold=KFold(n_splits=5) # KFold 객체 생성\ncv_accuracy=[] # fold set별 정확도를 담을 리스트 객체 생성\nprint('붓꽃 데이터 세트 크기:',features.shape[0])", "붓꽃 데이터 세트 크기: 150\n" ] ], [ [ "---", "_____no_output_____" ], [ "```python\nkfold=KFold(n_splits=5)\n```\n로 KFold객체를 생성했으니 객체의 split()을 호출해 전체 붓꽃 데이터를 5개의 fold 데이터 세트로 분리하자. 붓꽃 데이터 세트 크기가 150개니 120개는 학습용, 30개는 검증 테스트 데이터 세트이다. ", "_____no_output_____" ] ], [ [ "n_iter=0\nfor train_index,test_index in kfold.split(features):\n # kfold.split()으로 반환된 인덱스를 이용해 학습용, 검증용 테스트 데이터 추출\n X_train, X_test = features[train_index], features[test_index]\n y_train, y_test = label[train_index], label[test_index]\n # 학습 및 예측\n dt_clf.fit(X_train, y_train)\n pred = dt_clf.predict(X_test)\n n_iter+=1\n # 반복 시마다 정확도 측정\n accuracy = np.round(accuracy_score(y_test,pred),4)\n train_size = X_train.shape[0]\n test_size = X_test.shape[0]\n print('\\n#{0} 교차 검증 정확도 :{1}, 학습 데이터 크기 :{2}, 검증 데이터 크기 :{3}'.format(n_iter,accuracy,train_size,test_size))\n print('#{0} 검증 세트 인덱스:{1}'.format(n_iter, test_index))\n cv_accuracy.append(accuracy)\n# 개별 iteration별 정확도를 합하여 평균 정확도 계산\nprint('\\n *Conclusion* 평균 검증 정확도:', np.mean(cv_accuracy))", "\n#1 교차 검증 정확도 :1.0, 학습 데이터 크기 :120, 검증 데이터 크기 :30\n#1 검증 세트 인덱스:[ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23\n 24 25 26 27 28 29]\n\n#2 교차 검증 정확도 :0.9667, 학습 데이터 크기 :120, 검증 데이터 크기 :30\n#2 검증 세트 인덱스:[30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53\n 54 55 56 57 58 59]\n\n#3 교차 검증 정확도 :0.8667, 학습 데이터 크기 :120, 검증 데이터 크기 :30\n#3 검증 세트 인덱스:[60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83\n 84 85 86 87 88 89]\n\n#4 교차 검증 정확도 :0.9333, 학습 데이터 크기 :120, 검증 데이터 크기 :30\n#4 검증 세트 인덱스:[ 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107\n 108 109 110 111 112 113 114 115 116 117 118 119]\n\n#5 교차 검증 정확도 :0.7333, 학습 데이터 크기 :120, 검증 데이터 크기 :30\n#5 검증 세트 인덱스:[120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137\n 138 139 140 141 142 143 144 145 146 147 148 149]\n\n *Conclusion* 평균 검증 정확도: 0.9\n" ] ], [ [ "----", "_____no_output_____" ], [ "- 교차 검증시마다 검증 세트의 인덱스가 달라짐을 알 수 있다. \n\n- 검증세트 인덱스를 살펴보면 104p에서 설명한 그림의 설명과 유사함", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "> Stratified K 폴드", "_____no_output_____" ], [ ": 불균형한 분포도를가진 레이블(결정 클래스) 데이터 집합을 위한 K 폴드 방식이다. 불균형한 분포도를 가진 레이블 데이터 집합은 특정 레이블 값이 특이하게 많거나 또는 적어서 분포가 한쪽으로 치우치는 것을 말함", "_____no_output_____" ], [ "가령 대출 사기 데이터를 예측한다고 가정해보자, 이 데이터 세트는 1억건이고 수십개의 feature와 대출 사기 여부를 뜻하는 label(정상 대출0, 대출사기 : 1)로 구성돼 있다. K폴드로 랜덤하게 학습 및 테스트 세트의 인덱스를 고르더라도 레이블 값인 0과1의 비율을 제대로 반영하지 못하게 됨. 따라서 원본 데이터와 유사한 대출 사기 레이블 값의 분포를 학습/테스트 세트에도 유지하는 게 매우 중요", "_____no_output_____" ], [ "- ***Stratified K 폴드는 이처럼 K폴드가 레이블 데이터 집합이 원본 데이터 집합의 레이블 분포를 학습 및 테스트 세트에 제대로 분배하지 못하는 경우의 문제를 해결해줌***", "_____no_output_____" ], [ "붓꽃 데이터 세트를 DataFrame으로 생성하고 레이블 값의 분포도를 먼저 확인해보자", "_____no_output_____" ] ], [ [ "import pandas as pd\niris=load_iris()\niris_df=pd.DataFrame(data=iris.data,columns=iris.feature_names)\niris_df['label']=iris.target\nprint(iris_df['label'].value_counts(),'\\n')", "0 50\n1 50\n2 50\nName: label, dtype: int64 \n\n" ] ], [ [ "- label값은 모두 50개로 분배되어 있음", "_____no_output_____" ] ], [ [ "kfold=KFold(n_splits=3)\nn_iter=0\nfor train_index, test_index in kfold.split(iris_df):\n n_iter+=1\n label_train = iris_df['label'].iloc[train_index]\n label_test=iris_df['label'].iloc[test_index]\n print('## 교차 검증: {}'.format(n_iter))\n print('학습 레이블 데이터 분포:\\n', label_train.value_counts())\n print('검증 레이블 데이터 분포:\\n', label_test.value_counts())\n print('------------------------------------------------------')", "## 교차 검증: 1\n학습 레이블 데이터 분포:\n 1 50\n2 50\nName: label, dtype: int64\n검증 레이블 데이터 분포:\n 0 50\nName: label, dtype: int64\n------------------------------------------------------\n## 교차 검증: 2\n학습 레이블 데이터 분포:\n 0 50\n2 50\nName: label, dtype: int64\n검증 레이블 데이터 분포:\n 1 50\nName: label, dtype: int64\n------------------------------------------------------\n## 교차 검증: 3\n학습 레이블 데이터 분포:\n 0 50\n1 50\nName: label, dtype: int64\n검증 레이블 데이터 분포:\n 2 50\nName: label, dtype: int64\n------------------------------------------------------\n" ] ], [ [ "- 교차 검증 시마다 3개의 폴드 세트로 만들어지는 학습 레이블과 검증 레이블이 완전히 다른 값으로 추출되었다. 예를 들어 첫번째 교차 검증에서는 학습 레이블의 1,2값이 각각 50개가 추출되었고 검증 레이블의 0값이 50개 추출되었음, 즉 학습레이블은 1,2 밖에 없으므로 0의 경우는 전혀 학습하지 못함. 반대로 검증 레이블은 0밖에 없으므로 학습 모델은 절대 0을 예측하지 못함. 이런 유형으로 교차 검증 데이터 세트를 분할하면 검증 예측 정확도는 0이 될 수밖에 없다. ", "_____no_output_____" ], [ "- StratifiedKFold는 이렇게 KFold로 분할된 레이블 데이터 세트가 전체 레이블 값의 분포도를 반영하지 못하는 문제를 해결함. ", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "실습해보자", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import StratifiedKFold\nskf=StratifiedKFold(n_splits=3)\nn_iter=0\n\n# split 메소드에 인자로 feature데이터 세트뿐만 아니라 레이블 데이터 세트도 반드시 넣어줘야함\nfor train_index,test_index in skf.split(iris_df,iris_df['label']):\n n_iter+=1\n label_train=iris_df['label'].iloc[train_index]\n label_test=iris_df['label'].iloc[test_index]\n print('## 교차검증: {}'.format(n_iter))\n print('학습 레이블 데이터 분포: \\n', label_train.value_counts())\n print('검증 레이블 데이터 분포: \\n', label_test.value_counts())\n print('--------------------------------------------------------')", "## 교차검증: 1\n학습 레이블 데이터 분포: \n 2 34\n0 33\n1 33\nName: label, dtype: int64\n검증 레이블 데이터 분포: \n 0 17\n1 17\n2 16\nName: label, dtype: int64\n--------------------------------------------------------\n## 교차검증: 2\n학습 레이블 데이터 분포: \n 1 34\n0 33\n2 33\nName: label, dtype: int64\n검증 레이블 데이터 분포: \n 0 17\n2 17\n1 16\nName: label, dtype: int64\n--------------------------------------------------------\n## 교차검증: 3\n학습 레이블 데이터 분포: \n 0 34\n1 33\n2 33\nName: label, dtype: int64\n검증 레이블 데이터 분포: \n 1 17\n2 17\n0 16\nName: label, dtype: int64\n--------------------------------------------------------\n" ] ], [ [ "- 학습 레이블과 검증 레이블 데이터 값의 분포도가 동일하게 할당됐음을 알 수 있다. 이렇게 분할이 되어야 레이블 값 0,1,2를 모두 학습할 수 있고 이에 기반해 검증을 수행할 수 있다.", "_____no_output_____" ], [ "- 이제 StratifiedKFold를 이용해 붓꽃 데이터를 교차 검증해보자", "_____no_output_____" ] ], [ [ "df_clf=DecisionTreeClassifier(random_state=156)\nskfold=StratifiedKFold(n_splits=3)\nn_iter=3\ncv_accuracy=[]\n\n# StratifiedKFol의 split() 호출시 반드시 레이블 데이터 세트도 추가 입력 필요\nfor train_index, test_ondex in skfold.split(features, label):\n # split()으로 반환된 인덱스를 이용해 학습용, 검증용 테스트 데이터 추출\n X_train,X_test=features[train_index],features[test_index]\n y_train,y_test=label[train_index], label[test_index]\n # 학습 및 예측\n df_clf.fit(X_train,y_train)\n pred=dt_clf.predict(X_test)\n # 반복시마다 정확도 측정\n n_iter+=1\n accuracy=np.around(accuracy_score(y_test,pred),4)\n train_size=X_train.shape[0]\n test_size = X_test.shape[0]\n print('\\n#{} 교차 검증 정확도 : {}, 학습 데이터 크기 : {}, 검증 데이터 크기 : {}'.format(n_iter,accuracy,train_size,test_size))\n print('#{} 검증 세트 인덱스: {}'.format(n_iter, test_index))\n cv_accuracy.append(accuracy)\n # 교차 검증별 정확도 및 평균 정확도 계산\n print('\\n## 교차 검증별 정확도:', np.around(cv_accuracy,4))\n print('## 평균 검증 정확도:',np.mean(cv_accuracy))", "\n#4 교차 검증 정확도 : 0.92, 학습 데이터 크기 : 100, 검증 데이터 크기 : 50\n#4 검증 세트 인덱스: [ 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 83 84\n 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 133 134 135\n 136 137 138 139 140 141 142 143 144 145 146 147 148 149]\n\n## 교차 검증별 정확도: [0.92]\n## 평균 검증 정확도: 0.92\n\n#5 교차 검증 정확도 : 0.92, 학습 데이터 크기 : 100, 검증 데이터 크기 : 50\n#5 검증 세트 인덱스: [ 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 83 84\n 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 133 134 135\n 136 137 138 139 140 141 142 143 144 145 146 147 148 149]\n\n## 교차 검증별 정확도: [0.92 0.92]\n## 평균 검증 정확도: 0.92\n\n#6 교차 검증 정확도 : 0.92, 학습 데이터 크기 : 100, 검증 데이터 크기 : 50\n#6 검증 세트 인덱스: [ 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 83 84\n 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 133 134 135\n 136 137 138 139 140 141 142 143 144 145 146 147 148 149]\n\n## 교차 검증별 정확도: [0.92 0.92 0.92]\n## 평균 검증 정확도: 0.92\n" ] ], [ [ "----", "_____no_output_____" ], [ "> ### ***`교차 검증을 보다 간편하게 - cross_val_score()`***", "_____no_output_____" ] ], [ [ "from sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import cross_val_score,cross_validate\nfrom sklearn.datasets import load_iris\n\niris_data=load_iris()\ndt_clf = DecisionTreeClassifier(random_state=156)\n\ndata= iris_data.data\nlabel=iris_data.target\n\n# 성능 지표는 정확도 (accuracy), 교차 검증 세트는 3개\nscores = cross_val_score(dt_clf, data, label, scoring='accuracy', cv=3)\nprint('교차 검증별 정확도: ',np.round(scores,4))\nprint('평균 검증 정확도: ',np.round(np.mean(scores),4))", "교차 검증별 정확도: [0.98 0.94 0.98]\n평균 검증 정확도: 0.9667\n" ] ], [ [ "- cv로 지정된 횟수만큼 scoring 파라미터로 지정된 평가지표로 평가 결과값을 배열로 반환", "_____no_output_____" ], [ "----", "_____no_output_____" ], [ "> ### ***`GridSearchCV - 교차 검증과 최적 하이퍼 파라미터 튜닝을 동시에`***", "_____no_output_____" ], [ "- 하이퍼 파라미터? 머신러닝 알고리즘을 구성하는 주요 구성 요소이며, 이 값을 조정해 알고리즘의 예측 성능을 개선할 수 있음", "_____no_output_____" ] ], [ [ "from sklearn.datasets import load_iris\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import GridSearchCV\n\n# 데이터를 로딩하고 학습 데이터와 테스트 데이터 분리\niris_data = load_iris()\nX_train, X_test, y_train, y_test = train_test_split(iris_data.data,iris_data.target, test_size=0.2, random_state=121)\ndtree= DecisionTreeClassifier()\n\n# 파라미터를 딕셔너리 형태로 설정\nparameters = {'max_depth' : [1,2,3], 'min_samples_split' : [2,3]}\n\nimport pandas as pd\n# param_grid의 하이퍼 파라미터를 3개의 train, test set fold로 나누어 테스트 수행 설정\n# rifit=True가 default이며, 이때 가장 젛은 파라미터 설정으로 재학습시킴\ngrid_dtree = GridSearchCV(dtree, param_grid=parameters, cv=3, refit=True)\n# 붓꽃 학습 데이터로 param_grid의 하이퍼 파라미터를 순차적으로 학습/평가\ngrid_dtree.fit(X_train,y_train)\n#GridSearchCV 결과를 추출해 DataFrame으로 변환\nscores_df = pd.DataFrame(grid_dtree.cv_results_)\nscores_df[['params','mean_test_score','rank_test_score','split0_test_score','split1_test_score','split2_test_score']]", "_____no_output_____" ], [ "print('GridSearchCV 최적 파라미터:', grid_dtree.best_params_)\nprint('GridSearchCV 최고 정확도:{:4f}'.format(grid_dtree.best_score_))", "GridSearchCV 최적 파라미터: {'max_depth': 3, 'min_samples_split': 2}\nGridSearchCV 최고 정확도:0.975000\n" ] ], [ [ "- 인덱스 4,5rk rank_test_score가 1인 것으로 보아 공동 1위이며 예측 성능 1등을 의미함. \n- 열 4,5,6은 cv=3 이라서 열2는 그 세개의 평균을 의미", "_____no_output_____" ] ], [ [ "# GridSearchCV의 refit으로 이미 학습된 estimator 반환\nestimator = grid_dtree.best_estimator_\n# GridSearchCV의 best_estimator_는 이미 최적 학습이 됐으므로 별도 학습이 필요없음\npred = estimator.predict(X_test)\nprint('테스트 데이터 세트 정확도: {:.4f}'.format(accuracy_score(y_test,pred)))", "테스트 데이터 세트 정확도: 0.9667\n" ] ], [ [ "- 일반적으로 학습 데이터를 GridSearchCV를 이용해 최적 하이퍼 파라미터 튜닝을 수행한 뒤에 별도의 테스트 세트에서 이를 평가하는 것이 일반적인 머신 러닝 모델 적용 방법이다.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
4a7c460764387c41d0f10954073c8ebab67ce9ae
313,909
ipynb
Jupyter Notebook
day_1/exercises.ipynb
jurra/software_carpentries
bca46d858b5f8dcc12b57e2ac4e44d0483ed2977
[ "CC0-1.0" ]
null
null
null
day_1/exercises.ipynb
jurra/software_carpentries
bca46d858b5f8dcc12b57e2ac4e44d0483ed2977
[ "CC0-1.0" ]
null
null
null
day_1/exercises.ipynb
jurra/software_carpentries
bca46d858b5f8dcc12b57e2ac4e44d0483ed2977
[ "CC0-1.0" ]
1
2022-03-02T14:56:28.000Z
2022-03-02T14:56:28.000Z
180.511213
84,416
0.901089
[ [ [ "from IPython import display", "_____no_output_____" ] ], [ [ "# What to expect from the Python lessons\n- Get you started with python through a little project\n- Showcase relevant use cases of python for exploratory data analysis \n- Provide you with exercises during and after the lessons so that you practice and experience python\n- Provide you with good reads to develop further the wonderful craft of programming", "_____no_output_____" ], [ "# Coding project introduction\n\nWe will work with hypothetical clynical trial data about a drug that aims to reduce arthritis inflammation.\nThe data analysis goal is to assess the effectiveness of the treament and inform the clynical trial with data visualization taking advantage of interactive notebooks.\n\nMore details on this later ;)\n", "_____no_output_____" ], [ "## Software Carpentry - Python part 1\n\n12:45 - 0.Introduction to Jupyter notebooks \n13:00 - 1.Python fundamentals \n13:45 - Exercises in Breakout Room \n\n**14:00 - Coffee break** \n\n14:15 - 2.Loading data \n14:45 - 3.Visualizing data \n\n**15:30 - Coffee break** \n\n15:45 - 4.Repeating actions \n16:30 - Exercises in Breakout Room \n16:50 - Wrap up ", "_____no_output_____" ], [ "# Quick tour to Anaconda (Individual edition)\n\n\"**Your data science toolkit**: With over 25 million users worldwide, the open-source Individual Edition (Distribution) is the easiest way to perform Python/R data science and machine learning on a single machine. Developed for solo practitioners, it is the toolkit that equips you to work with thousands of open-source packages and libraries.\"\n\nhttps://www.anaconda.com/products/individual", "_____no_output_____" ], [ "## Anaconda installation comes with:\n* Latest version of Python\n* JupyterLab, Jupyter notebooks and several other IDE options\n* An easy-to-install collection of high performance Python libraries\n* Conda, tool for managing packages and environments\n* Collection of open source packages and tools to install over 1.5k additional packages\n* Anaconda Navigator is a graphical user interface to the conda package and environment manager and a free scientific-computing environment.", "_____no_output_____" ], [ "# Let's open up a jupyter notebook!\n\n## Method 1: Anaconda Navigator\n\n", "_____no_output_____" ], [ "## Method 2: Terminal", "_____no_output_____" ], [ "Open notebook example: [Gravitational Wave Open Science Center](https://mybinder.org/v2/gh/losc-tutorial/quickview/master?filepath=index.ipynb)", "_____no_output_____" ], [ "# Why are we using notebooks instead of an IDE like Spyder\n\n- Storytelling with documented and re-executable analysis\n- Great for exploring libraries, commands and code snippets\n- Persistence of code snippets you want to reuse and try compared to a terminal or ipython.\n\n", "_____no_output_____" ] ], [ [ "# This is a code snippet\ndef say_hello(your_name): # define say_hello function\n print('Hello ' + your_name)\n\nsay_hello('Jose') # Here we call the function", "Hello Jose\n" ] ], [ [ "## Dont confuse the interactive notebook documents with the \"jupyter notebook\" application\n- The first one is a document format that you can find with an `.ipynb` file extension\n- The second is an application where you can use such documents\n- Interactive notebooks or `ipynb`s are very related to `.ipython`an interactive shell.", "_____no_output_____" ], [ "Open notebook example: [Gravitational Wave Open Science Center](https://mybinder.org/v2/gh/losc-tutorial/quickview/master?filepath=index.ipynb)", "_____no_output_____" ], [ "# 1. Python fundamentals", "_____no_output_____" ], [ "## Lets get started with python and see what it can do \n- Open your jupyter notebook application \n- Go to `day_1/` directory and open the `exercises.ipynb` notebook.\n", "_____no_output_____" ], [ "### How to use the `exercises.ipynb`\n\n- Lets write our first code snippet in a cell\n- Lets execute code (pressing run or pressing `Shift + Enter`)\n- Lets write some markdown in another cell", "_____no_output_____" ] ], [ [ "# Write your first code snippet", "_____no_output_____" ], [ "# Write your first markdown cell", "_____no_output_____" ] ], [ [ "# Things you should know about python for Today\n* Easy to code, It is a very easy to learn language.\n* It has a lot of reusable code that allows you to do powerful things (libraries)\n* High-Level, when we write python code you dont have to remember systems architecture\n* Open source and highly extendable!\n* Indentation and spaces make a difference\n\n", "_____no_output_____" ] ], [ [ "# In this snippet we demonstrate how indentation works in python\nnames = ['Curie', 'Darwin', 'Turing']\n\n\nfor name in names:\nprint(name)", "_____no_output_____" ] ], [ [ "## Variables and data types in python\nCreate variables to describe patient characteristics:\n- Patient identifier\n- Patient name\n- Age\n- Weight\n- Specificatin of inflammations status (is inflammated, is not inflammated)", "_____no_output_____" ] ], [ [ "# Demonstrate multiple assignment with two patient names", "_____no_output_____" ], [ "# Demonstrate variables valid and invalid names \n# Examples 1_patient vs patient_1\n\n## patient name = \"Peter Pan\" # This will not work\n## 1_patient_name = \"Peter Pan\" # This will not work\n\n# Trying data types\n\n# Try integer type with patient_1 age\n\n# Try floating points with patient's weight\n\n# a patient_id string\n\n# Boolean, does patient \"x\" has had inflammation?\n\n# Get information about data types using a built in function\n", "_____no_output_____" ] ], [ [ "# Combining everything we have learned into a patients dataset\n- We will create a list of patients\n- And start populating and manipulating that list with python", "_____no_output_____" ] ], [ [ "# Working with lists data types\n# First lets make an empty list\n\n# Lets populate the list with our previous values assigned to the patients variables\n\n# Grouping more data points per patient, we only have names\n\n# A patients list with different data points per patient\n\n# Demonstrate a two dimensional list: [[], [], []]\n", "_____no_output_____" ], [ "# Organizing the same data using dictionaries\n\n# Manipulating a list and adding entries to the list", "_____no_output_____" ] ], [ [ "# What have we done so far?\n- We know how to assign different data types to different values\n- We know how to compose more complex data types by aggregating the basic data types or primitive data types\n- We can use the notebook now to play around and try code snippets\n- We know how to use `print()` python builtin function to interact display results as outputs", "_____no_output_____" ], [ "# Lets put everything we have learned together into a little app to enter patients data into a list\n\n- We will introduce 4 patients to the patients list\n- Try to stick to the same names I am using...", "_____no_output_____" ] ], [ [ "# We can use input like this\n# patient_name = input()", "_____no_output_____" ], [ "# Lets clear our list again and start using the input() to populate the list\n# patients = []", "_____no_output_____" ], [ "# Create a patient id\n\n# Using the input() function:\n\n# Enter patients name and add the entry to patients list\n\n# Enter patients age and add the entry to patients list\n\n# Enter patients weight and add the entry to patients list\n\n# Specify if patient was inflammated\n\n## Lets run the cell 4 times to get a dummy dataset", "_____no_output_____" ], [ "# Saving the data into a csv vile\n", "_____no_output_____" ], [ "# We can recreate our list in case it disappears from memory\npatients = [[0, 'Peter Pan', '40', '80', 'True'],\n [1, 'Wendy Fuller', '30', '60', 'False'],\n [2, 'Mariam McWire', '37', '70', 'False'],\n [3, 'Robin Hood', '60', '80', 'True']]", "_____no_output_____" ], [ "# Check and read the file we created\n", "_____no_output_____" ] ], [ [ "# Checking data types to store data according to our original specifications", "_____no_output_____" ] ], [ [ "# Lets check the types and see if the the data complies with requirements we have defined upfront\n\n# Another way of checking with the == operator\n\n# Do it with a for loop to inspect the data\n", "_____no_output_____" ] ], [ [ "## Here we have a problem of types that dont match what we defined at the beginning\n- Age should be an integer\n- Weight should be a float\n- And is_inflammated should be a boolean type\n\nHold this issue, we will come back to it later ;)", "_____no_output_____" ], [ "# Breakout Session 1", "_____no_output_____" ], [ "# Exercise 1 - Slicing strings\nA section of an array is called a _slice_. We can take slices of character strings as well:", "_____no_output_____" ] ], [ [ "element = \"oxygen\"\nprint('first three characters:', element[0:3])\nprint('last three characters:', element[3:6])", "first three characters: oxy\nlast three characters: gen\n" ] ], [ [ "What is the value of `element[4]`? What about `element[4:]`? Or `element[:]`?\nWhat is `element[-1]`? What is `element[-3:]`?", "_____no_output_____" ] ], [ [ "# Your solution", "_____no_output_____" ] ], [ [ "# Excercise 2 - Slicing lists with steps\n\nSo far we’ve seen how to use slicing to take single blocks of successive entries from a sequence. But what if we want to take a subset of entries that aren’t next to each other in the sequence? You can achieve this by providing a third argument to the range within the brackets, called the step size. \n\nThe full sytax for creating slices is `[begin:end:step]`, although you most often find a short-hand notation as we've seen in Exercise 1. \n\nThe example below shows how you can take every third entry in a list:", "_____no_output_____" ] ], [ [ "primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37]\nsubset = primes[0:12:3]\nprint('subset', subset)", "subset [2, 7, 17, 29]\n" ] ], [ [ "Given the following list of months:", "_____no_output_____" ] ], [ [ "months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']", "_____no_output_____" ] ], [ [ "### Questions\n\n1. What slice of `months` will produce the following output `['jan', 'mar', 'may', 'jul', 'sep', nov']`?\n\n1. Given the short-hand notation we used for the character string in Exercise 1 (i.e. `element[:2] == 'element[0:2]`), can you find the short-hand notation for question 1? What do you find easier to read?\n\n1. Using the step size parameter, can you think of a way to reverse the list?", "_____no_output_____" ] ], [ [ "# Your solution", "_____no_output_____" ] ], [ [ "# Back to our code project: analysing patient data\n\n\n### Arthritis Inflammation\n\nWe are studying inflammation in patients who have been given a new treatment for arthritis.\n\nThere are 60 patients, who had their inflammation levels recorded for 40 days. We want to analyze these recordings to study the effect of the new arthritis treatment.\n\nTo see how the treatment is affecting the patients in general, we would like to:\n\n1. Calculate the average inflammation per day across all patients.\n1. Plot the result to discuss and share with colleagues.", "_____no_output_____" ] ], [ [ "display.Image(\"../img/lesson-overview.png\")", "_____no_output_____" ] ], [ [ "What if we need the maximum inflammation for each patient over all days (as in the next diagram on the left) or the average for each day (as in the diagram on the right)?", "_____no_output_____" ] ], [ [ "display.Image(\"../img/python-operations-across-axes.png\")", "_____no_output_____" ] ], [ [ "# 2. Analysing patient data\nLets have a look at the dataset first", "_____no_output_____" ] ], [ [ "# NumPy is a Python library used for operations with matrices and arrays\n# Numpy comes pre-installed with anaconda\nimport numpy", "_____no_output_____" ], [ "data = numpy.loadtxt(fname='data/inflammation-01.csv', delimiter=',')\nprint(data) # Shows small part of the dataset\n\n# test type of value holded by data variable\nprint(type(data))\nprint(data.dtype) # Highlight OOP\nprint(data.shape)", "[[0. 0. 1. ... 3. 0. 0.]\n [0. 1. 2. ... 1. 0. 1.]\n [0. 1. 1. ... 2. 1. 1.]\n ...\n [0. 1. 1. ... 1. 1. 1.]\n [0. 0. 0. ... 0. 2. 0.]\n [0. 0. 1. ... 1. 1. 0.]]\n<class 'numpy.ndarray'>\nfloat64\n(60, 40)\n" ], [ "display.Image(\"../img/accessing_elements.png\", width=\"500\")", "_____no_output_____" ], [ "# Indexing an array\n\n# Slicing data", "first element in data: 0.0\n[[0. 0. 1. 3. 1. 2. 4. 7. 8. 3.]\n [0. 1. 2. 1. 2. 1. 3. 2. 2. 6.]\n [0. 1. 1. 3. 3. 2. 6. 2. 5. 9.]\n [0. 0. 2. 0. 4. 2. 2. 1. 6. 7.]]\n[[2. 3. 0. 0.]\n [1. 1. 0. 1.]\n [2. 2. 1. 1.]]\n" ], [ "display.Image(\"../img/slicing-a-2d-numpy-array.png\", width=\"200\")", "_____no_output_____" ] ], [ [ "```python\nsquare_array[1:3,1:4] # slice the array as it is in the figure\n```", "_____no_output_____" ], [ "# Talk about libraries and OOP a bit\n- For Today the most important to know about object oriented programing, is that is a way of structuring code in a certain way.\n- Python is OOP and it is particularly noticeable when you are reusing libraries like numpy.\n", "_____no_output_____" ] ], [ [ "# Using methods fromt he class numpy\n", "_____no_output_____" ], [ "# Useful statistics, assign the variables using multi assignment\n", "maximum inflammation: 20.0\nmininum inflammation: 0.0\nstandard deviation: 4.613833197118566\n" ] ], [ [ "When analysing data, though, we often want to look at variations in statistical values, such as the maximum inflammation per patient or the average inflammation per day.", "_____no_output_____" ] ], [ [ "# Mean per day over all patients\n\n", "[ 0. 0.45 1.11666667 1.75 2.43333333 3.15\n 3.8 3.88333333 5.23333333 5.51666667 5.95 5.9\n 8.35 7.73333333 8.36666667 9.5 9.58333333 10.63333333\n 11.56666667 12.35 13.25 11.96666667 11.03333333 10.16666667\n 10. 8.66666667 9.15 7.25 7.33333333 6.58333333\n 6.06666667 5.95 5.11666667 3.6 3.3 3.56666667\n 2.48333333 1.5 1.13333333 0.56666667]\n(40,)\n" ], [ "# The average inflammation per patient across all days\n\n# As a quick check, we can look at the shape\n", "[5.45 5.425 6.1 5.9 5.55 6.225 5.975 6.65 6.625 6.525 6.775 5.8\n 6.225 5.75 5.225 6.3 6.55 5.7 5.85 6.55 5.775 5.825 6.175 6.1\n 5.8 6.425 6.05 6.025 6.175 6.55 6.175 6.35 6.725 6.125 7.075 5.725\n 5.925 6.15 6.075 5.75 5.975 5.725 6.3 5.9 6.75 5.925 7.225 6.15\n 5.95 6.275 5.7 6.1 6.825 5.975 6.725 5.7 6.25 6.4 7.05 5.9 ]\n(60,)\n" ] ], [ [ "## Using pandas to show how libraries build on top of each other to add value\nPandas is built on top of NumPy, which means the Python pandas package depends on the NumPy package and also pandas intended with many other 3rd party libraries.", "_____no_output_____" ] ], [ [ "# Use pandas to show how to handle array data \n# Explain that pandas are based on numpy arrays\n\n", "_____no_output_____" ] ], [ [ "### Key points\n* Import a library into a program using import libraryname.\n* Use the `numpy` library to work with arrays in Python.\n* Use array `[x, y]` to select a single element from a 2D array.\n* Use `numpy.mean(array)`, `numpy.max(array)`, and `numpy.min(array)` to calculate simple statistics.\n* Use `numpy.mean(array, axis=0)` or `numpy.mean(array, axis=1)` to calculate statistics across the specified axis.\\\n\n### Questions?", "_____no_output_____" ], [ "# 3. Visualizing Tabular Data", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot\n# Let's make our lives easier with an alias\n## import matplotlib.pyplot as plt", "_____no_output_____" ], [ "# Plot entire dataset as a heatmap", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\n\n# Plot the mean inflammation per day over all patients", "_____no_output_____" ] ], [ [ "This data looks suspicious! I would not expect to find a sharp peak in an average of the dataset. Very unlikely that the inflammation of all patients spikes on day 18. Let's look at two other statistics: max and min", "_____no_output_____" ] ], [ [ "# max method of numpy", "_____no_output_____" ], [ "# min method of numpy", "_____no_output_____" ], [ "# Setup subplots", "_____no_output_____" ] ], [ [ "### Try to do this on your own later:\nCreate `inflammation_analysis.ipynb` \nLet's organize our analysis in a new notebook\n\n**Steps:**\n1. Set up subplots\n2. Load data\n3. Plot data\n4. Add labels\n5. Save the figure", "_____no_output_____" ], [ "### Key points\n* Use the `pyplot` module from the `matplotlib` library for creating simple visualizations.\n* Use an alias when importing lengthy library names, e.g. `import matplotlib.pyplot as plt`\n* Create subplots and add labels\n\n### Questions?", "_____no_output_____" ], [ "# 4. Repeating actions", "_____no_output_____" ] ], [ [ "# Simple examples of for loops", "_____no_output_____" ] ], [ [ "### Looping over multiple data files\n\nAs a final piece to processing our inflammation data, we need a way to get a list of all the files in our data directory whose names start with `inflammation-` and end with `.csv`. The following library will help us to achieve this:", "_____no_output_____" ] ], [ [ "# Loop analysis over filenames", "_____no_output_____" ] ], [ [ "### Key points\n* Use `for variable in sequence` to process the elements of a sequence one at a time.\n* The body of a `for` loop must be indented.\n* Use `glob.glob(pattern)` to create a list of files whose names match a pattern.\n* Use `*` in a pattern to match zero or more characters, and `?` to match any single character.\n\n### Questions?", "_____no_output_____" ], [ "# Breakout Session 2", "_____no_output_____" ], [ "# Exercise 3 - Change in inflammation\n\nThe patient data is longitudinal in the sense that each row represents a series of observations relating to one individual. This means that the change in inflammation over time is a meaningful concept.\n\nThe `numpy.diff()` function takes an array and returns the differences between two successive values. Let’s use it to examine the changes each day across the first week of patient 3 from our inflammation dataset:", "_____no_output_____" ] ], [ [ "# Load data\nimport numpy\ndata = numpy.loadtxt(fname='data/inflammation-01.csv', delimiter=',')", "_____no_output_____" ], [ "patient3_week1 = data[3, :7]\nprint(patient3_week1)", "[0. 0. 2. 0. 4. 2. 2.]\n" ], [ "patient3_week1 = data[3, :7]\nprint(patient3_week1)", "[0. 0. 2. 0. 4. 2. 2.]\n" ] ], [ [ "Calling `numpy.diff(patient3_week1)` would do the following calculations", "_____no_output_____" ] ], [ [ "numpy.diff(patient3_week1)", "_____no_output_____" ] ], [ [ "### Questions\n\n1. When calling `numpy.diff()` with a multi-dimensional array, an axis argument may be passed to the function to specify which axis to process. When applying `numpy.diff()` to our 2D inflammation array data, which axis would we specify?\n", "_____no_output_____" ] ], [ [ "# Your solution", "_____no_output_____" ] ], [ [ "# Exercise 4 - Plotting differences\n\nPlot the difference between the average inflammations reported in the first and second datasets (stored in `inflammation-01.csv` and `inflammation-02.csv`, correspondingly), i.e., the difference between the leftmost plots of the first two figures we have plotted so far. \n\nSteps:\n1. Import libraries\n1. Import data\n1. Calculate difference\n1. Create and annotate figure", "_____no_output_____" ] ], [ [ "# Your solution", "_____no_output_____" ] ], [ [ "# End of lesson", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
4a7c5ca3ca263d5946112d38888d4ba7b011fe12
8,130
ipynb
Jupyter Notebook
src/main/model_benchmark/resnettransferlearning.ipynb
hanzopgp/JetsonAutonomousDriving
b08600041e787ea0e79eb054d6a3efd99f3cfd8d
[ "MIT" ]
null
null
null
src/main/model_benchmark/resnettransferlearning.ipynb
hanzopgp/JetsonAutonomousDriving
b08600041e787ea0e79eb054d6a3efd99f3cfd8d
[ "MIT" ]
null
null
null
src/main/model_benchmark/resnettransferlearning.ipynb
hanzopgp/JetsonAutonomousDriving
b08600041e787ea0e79eb054d6a3efd99f3cfd8d
[ "MIT" ]
null
null
null
8,130
8,130
0.661009
[ [ [ "import numpy as np\nfrom tqdm import tqdm\nfrom time import time\n\nimport torchvision\nfrom torchvision import models, transforms\n\nimport torch\nfrom torch import nn\nfrom torch.utils.tensorboard import SummaryWriter", "_____no_output_____" ], [ "def accuracy(yhat,y):\n # si y encode les indexes\n if len(y.shape)==1 or y.size(1)==1:\n return (torch.argmax(yhat,1).view(y.size(0),-1)== y.view(-1,1)).double().mean()\n # si y est encodé en onehot\n return (torch.argmax(yhat,1).view(-1) == torch.argmax(y,1).view(-1)).double().mean()\n\ndef train(model,epochs,train_loader,test_loader,feature_extract=False):\n model = model.to(device)\n writer = SummaryWriter(f\"{TB_PATH}/{model.name}\")\n \n params_to_update = model.parameters()\n print(\"params to learn:\")\n if feature_extract:\n params_to_update = []\n for name,param in model.named_parameters():\n if param.requires_grad == True:\n params_to_update.append(param)\n print(\"\\t\",name)\n else:\n for name,param in model.named_parameters():\n if param.requires_grad == True:\n print(\"\\t\",name)\n optim = torch.optim.Adam(params_to_update,lr=1e-3)\n \n print(f\"running {model.name}\")\n loss = nn.CrossEntropyLoss()\n for epoch in tqdm(range(epochs)):\n cumloss, cumacc, count = 0, 0, 0\n model.train()\n for x,y in train_loader:\n optim.zero_grad()\n x,y = x.to(device), y.to(device)\n yhat = model(x)\n l = loss(yhat,y)\n l.backward()\n optim.step()\n cumloss += l*len(x)\n cumacc += accuracy(yhat,y)*len(x)\n count += len(x)\n writer.add_scalar('loss/train',cumloss/count,epoch)\n writer.add_scalar('accuracy/train',cumacc/count,epoch)\n if epoch % 1 == 0:\n model.eval()\n with torch.no_grad():\n cumloss, cumacc, count = 0, 0, 0\n for x,y in test_loader:\n x,y = x.to(device), y.to(device)\n yhat = model(x)\n cumloss += loss(yhat,y)*len(x)\n cumacc += accuracy(yhat,y)*len(x)\n count += len(x)\n writer.add_scalar(f'loss/test',cumloss/count,epoch)\n writer.add_scalar('accuracy/test',cumacc/count,epoch)\n\ndef set_parameter_requires_grad(model, feature_extract):\n if feature_extract:\n for name,p in model.named_parameters():\n if \"fc\" not in name:\n p.requires_grad = False \n else:\n p.requires_grad = True \n \ndef get_test_data(dataloader, size):\n X_test, Y_test = next(iter(dataloader))\n batch_size = len(X_test)\n n = size//batch_size\n for i, batch in enumerate(dataloader):\n if i < n:\n X_tmp, Y_tmp = batch\n X_test = torch.cat((X_test, X_tmp), 0)\n Y_test = torch.cat((Y_test, Y_tmp), 0)\n return X_test, Y_test", "_____no_output_____" ], [ "TB_PATH = \"/tmp/logs/sceance2\"\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\nresnet = models.resnet18(pretrained=True)\n\nresnet.fc = nn.Linear(512, 10)\n\nprint(resnet.eval())\n\nset_parameter_requires_grad(resnet, True)", "_____no_output_____" ], [ "input_size = 224\nbatch_size = 128\n\nmean=[0.485, 0.456, 0.406]\nstd=[0.229, 0.224, 0.225]\n\ntransformresnetTrain=transforms.Compose([ # Cette fois on utilise pas de grayscale car nous avons un gros modele pré-entrainé\n transforms.RandomResizedCrop(input_size), # selection aléatoire d'une zone de la taille voulue (augmentation des données en apprentissage)\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(mean, std)\n ])\ntransformresnetTest=transforms.Compose([\n transforms.Resize(input_size), # selection de la zone centrale de la taille voulue\n transforms.CenterCrop(input_size),\n transforms.ToTensor(),\n transforms.Normalize(mean, std)\n ])\n\nresnet_trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transformresnetTrain)\nresnet_trainloader = torch.utils.data.DataLoader(resnet_trainset, batch_size=batch_size, pin_memory=True, shuffle=True)\n\nresnet_testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transformresnetTest)\nresnet_testloader = torch.utils.data.DataLoader(resnet_testset, batch_size=batch_size, pin_memory=True, shuffle=True)", "_____no_output_____" ], [ "## Entraînement du réseau\nresnet.name = \"resnet\"\ntrain(resnet, 1, resnet_trainloader, resnet_testloader)", "_____no_output_____" ], [ "## Accuracy\nX_test, Y_test = get_test_data(resnet_testloader, 1000) \nX_test, Y_test = X_test.to(device), Y_test.to(device)\nprint(\"Acc for resnet transfer learning :\", accuracy(resnet(X_test), Y_test))", "_____no_output_____" ], [ "for t in (20,40,60,80,100,120):\n t0 = time()\n resnet(X_test[:t])\n print(\"FPS:\", t, \" --> seconds:\", (time() - t0))", "_____no_output_____" ], [ "import os\nPATH = \"./\"\ntorch.save(resnet.state_dict(), os.path.join(PATH,\"resnet.pth\"))", "_____no_output_____" ], [ "PATH = \"./\"\nmodel = models.resnet18(pretrained=True)\nmodel.fc = nn.Linear(512, 10)\nmodel.load_state_dict(torch.load(os.path.join(PATH,\"vgg.pth\"),map_location='cpu'))\n\nmodel.eval() \ndummy_input = torch.randn(batch_size, 3, input_size, input_size) \ntorch.onnx.export(model, \n dummy_input, \n \"vgg.onnx\",\n export_params=True,\n do_constant_folding=True, \n input_names = ['modelInput'],\n output_names = ['modelOutput'])\nprint(\" \") \nprint('Model has been converted to ONNX') ", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a7c668794a4b066b61c6e49e25bf4609f3783e7
149,695
ipynb
Jupyter Notebook
2-Data-Wrangling/data-wrangling.ipynb
Syed-Sherjeel/Data-Analysis-with-Python
dbd08b0d2179714e171d0e71aa50f0b191e753dc
[ "MIT" ]
null
null
null
2-Data-Wrangling/data-wrangling.ipynb
Syed-Sherjeel/Data-Analysis-with-Python
dbd08b0d2179714e171d0e71aa50f0b191e753dc
[ "MIT" ]
null
null
null
2-Data-Wrangling/data-wrangling.ipynb
Syed-Sherjeel/Data-Analysis-with-Python
dbd08b0d2179714e171d0e71aa50f0b191e753dc
[ "MIT" ]
null
null
null
36.825338
8,804
0.479776
[ [ [ "<a href=\"https://www.bigdatauniversity.com\"><img src = \"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DA0101EN/Images/CCLog.png\" width = 300, align = \"center\"></a>\n\n<h1 align=center><font size=5>Data Analysis with Python</font></h1>", "_____no_output_____" ], [ "<h1>Data Wrangling</h1>", "_____no_output_____" ], [ "<h3>Welcome!</h3>\n\nBy the end of this notebook, you will have learned the basics of Data Wrangling! ", "_____no_output_____" ], [ "<h2>Table of content</h2>\n\n<div class=\"alert alert-block alert-info\" style=\"margin-top: 20px\">\n<ul>\n <li><a href=\"#identify_handle_missing_values\">Identify and handle missing values</a>\n <ul>\n <li><a href=\"#identify_missing_values\">Identify missing values</a></li>\n <li><a href=\"#deal_missing_values\">Deal with missing values</a></li>\n <li><a href=\"#correct_data_format\">Correct data format</a></li>\n </ul>\n </li>\n <li><a href=\"#data_standardization\">Data standardization</a></li>\n <li><a href=\"#data_normalization\">Data Normalization (centering/scaling)</a></li>\n <li><a href=\"#binning\">Binning</a></li>\n <li><a href=\"#indicator\">Indicator variable</a></li>\n</ul>\n \nEstimated Time Needed: <strong>30 min</strong>\n</div>\n \n<hr>", "_____no_output_____" ], [ "<h2>What is the purpose of Data Wrangling?</h2>", "_____no_output_____" ], [ "Data Wrangling is the process of converting data from the initial format to a format that may be better for analysis.", "_____no_output_____" ], [ "<h3>What is the fuel consumption (L/100k) rate for the diesel car?</h3>", "_____no_output_____" ], [ "<h3>Import data</h3>\n<p>\nYou can find the \"Automobile Data Set\" from the following link: <a href=\"https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data\">https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data</a>. \nWe will be using this data set throughout this course.\n</p>", "_____no_output_____" ], [ "<h4>Import pandas</h4> ", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport matplotlib.pylab as plt", "_____no_output_____" ] ], [ [ "<h2>Reading the data set from the URL and adding the related headers.</h2>", "_____no_output_____" ], [ "URL of the dataset", "_____no_output_____" ], [ "This dataset was hosted on IBM Cloud object click <a href=\"https://cocl.us/corsera_da0101en_notebook_bottom\">HERE</a> for free storage ", "_____no_output_____" ] ], [ [ "filename = \"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DA0101EN/auto.csv\"", "_____no_output_____" ] ], [ [ " Python list <b>headers</b> containing name of headers ", "_____no_output_____" ] ], [ [ "headers = [\"symboling\",\"normalized-losses\",\"make\",\"fuel-type\",\"aspiration\", \"num-of-doors\",\"body-style\",\n \"drive-wheels\",\"engine-location\",\"wheel-base\", \"length\",\"width\",\"height\",\"curb-weight\",\"engine-type\",\n \"num-of-cylinders\", \"engine-size\",\"fuel-system\",\"bore\",\"stroke\",\"compression-ratio\",\"horsepower\",\n \"peak-rpm\",\"city-mpg\",\"highway-mpg\",\"price\"]", "_____no_output_____" ] ], [ [ "Use the Pandas method <b>read_csv()</b> to load the data from the web address. Set the parameter \"names\" equal to the Python list \"headers\".", "_____no_output_____" ] ], [ [ "df = pd.read_csv(filename, names = headers)", "_____no_output_____" ] ], [ [ " Use the method <b>head()</b> to display the first five rows of the dataframe. ", "_____no_output_____" ] ], [ [ "# To see what the data set looks like, we'll use the head() method.\ndf.head()", "_____no_output_____" ] ], [ [ "As we can see, several question marks appeared in the dataframe; those are missing values which may hinder our further analysis. \n<div>So, how do we identify all those missing values and deal with them?</div> \n\n\n<b>How to work with missing data?</b>\n\nSteps for working with missing data:\n<ol>\n <li>dentify missing data</li>\n <li>deal with missing data</li>\n <li>correct data format</li>\n</ol>", "_____no_output_____" ], [ "<h2 id=\"identify_handle_missing_values\">Identify and handle missing values</h2>\n\n\n<h3 id=\"identify_missing_values\">Identify missing values</h3>\n<h4>Convert \"?\" to NaN</h4>\nIn the car dataset, missing data comes with the question mark \"?\".\nWe replace \"?\" with NaN (Not a Number), which is Python's default missing value marker, for reasons of computational speed and convenience. Here we use the function: \n <pre>.replace(A, B, inplace = True) </pre>\nto replace A by B", "_____no_output_____" ] ], [ [ "import numpy as np\n\n# replace \"?\" to NaN\ndf.replace(\"?\", np.nan, inplace = True)\ndf.head(5)", "_____no_output_____" ] ], [ [ "dentify_missing_values\n\n<h4>Evaluating for Missing Data</h4>\n\nThe missing values are converted to Python's default. We use Python's built-in functions to identify these missing values. There are two methods to detect missing data:\n<ol>\n <li><b>.isnull()</b></li>\n <li><b>.notnull()</b></li>\n</ol>\nThe output is a boolean value indicating whether the value that is passed into the argument is in fact missing data.", "_____no_output_____" ] ], [ [ "missing_data = df.isnull()\nmissing_data.head(5)", "_____no_output_____" ] ], [ [ "\"True\" stands for missing value, while \"False\" stands for not missing value.", "_____no_output_____" ], [ "<h4>Count missing values in each column</h4>\n<p>\nUsing a for loop in Python, we can quickly figure out the number of missing values in each column. As mentioned above, \"True\" represents a missing value, \"False\" means the value is present in the dataset. In the body of the for loop the method \".value_counts()\" counts the number of \"True\" values. \n</p>", "_____no_output_____" ] ], [ [ "for column in missing_data.columns.values.tolist():\n print(column)\n print (missing_data[column].value_counts())\n print(\"\") ", "symboling\nFalse 205\nName: symboling, dtype: int64\n\nnormalized-losses\nFalse 164\nTrue 41\nName: normalized-losses, dtype: int64\n\nmake\nFalse 205\nName: make, dtype: int64\n\nfuel-type\nFalse 205\nName: fuel-type, dtype: int64\n\naspiration\nFalse 205\nName: aspiration, dtype: int64\n\nnum-of-doors\nFalse 203\nTrue 2\nName: num-of-doors, dtype: int64\n\nbody-style\nFalse 205\nName: body-style, dtype: int64\n\ndrive-wheels\nFalse 205\nName: drive-wheels, dtype: int64\n\nengine-location\nFalse 205\nName: engine-location, dtype: int64\n\nwheel-base\nFalse 205\nName: wheel-base, dtype: int64\n\nlength\nFalse 205\nName: length, dtype: int64\n\nwidth\nFalse 205\nName: width, dtype: int64\n\nheight\nFalse 205\nName: height, dtype: int64\n\ncurb-weight\nFalse 205\nName: curb-weight, dtype: int64\n\nengine-type\nFalse 205\nName: engine-type, dtype: int64\n\nnum-of-cylinders\nFalse 205\nName: num-of-cylinders, dtype: int64\n\nengine-size\nFalse 205\nName: engine-size, dtype: int64\n\nfuel-system\nFalse 205\nName: fuel-system, dtype: int64\n\nbore\nFalse 201\nTrue 4\nName: bore, dtype: int64\n\nstroke\nFalse 201\nTrue 4\nName: stroke, dtype: int64\n\ncompression-ratio\nFalse 205\nName: compression-ratio, dtype: int64\n\nhorsepower\nFalse 203\nTrue 2\nName: horsepower, dtype: int64\n\npeak-rpm\nFalse 203\nTrue 2\nName: peak-rpm, dtype: int64\n\ncity-mpg\nFalse 205\nName: city-mpg, dtype: int64\n\nhighway-mpg\nFalse 205\nName: highway-mpg, dtype: int64\n\nprice\nFalse 201\nTrue 4\nName: price, dtype: int64\n\n" ] ], [ [ "Based on the summary above, each column has 205 rows of data, seven columns containing missing data:\n<ol>\n <li>\"normalized-losses\": 41 missing data</li>\n <li>\"num-of-doors\": 2 missing data</li>\n <li>\"bore\": 4 missing data</li>\n <li>\"stroke\" : 4 missing data</li>\n <li>\"horsepower\": 2 missing data</li>\n <li>\"peak-rpm\": 2 missing data</li>\n <li>\"price\": 4 missing data</li>\n</ol>", "_____no_output_____" ], [ "<h3 id=\"deal_missing_values\">Deal with missing data</h3>\n<b>How to deal with missing data?</b>\n\n<ol>\n <li>drop data<br>\n a. drop the whole row<br>\n b. drop the whole column\n </li>\n <li>replace data<br>\n a. replace it by mean<br>\n b. replace it by frequency<br>\n c. replace it based on other functions\n </li>\n</ol>", "_____no_output_____" ], [ "Whole columns should be dropped only if most entries in the column are empty. In our dataset, none of the columns are empty enough to drop entirely.\nWe have some freedom in choosing which method to replace data; however, some methods may seem more reasonable than others. We will apply each method to many different columns:\n\n<b>Replace by mean:</b>\n<ul>\n <li>\"normalized-losses\": 41 missing data, replace them with mean</li>\n <li>\"stroke\": 4 missing data, replace them with mean</li>\n <li>\"bore\": 4 missing data, replace them with mean</li>\n <li>\"horsepower\": 2 missing data, replace them with mean</li>\n <li>\"peak-rpm\": 2 missing data, replace them with mean</li>\n</ul>\n\n<b>Replace by frequency:</b>\n<ul>\n <li>\"num-of-doors\": 2 missing data, replace them with \"four\". \n <ul>\n <li>Reason: 84% sedans is four doors. Since four doors is most frequent, it is most likely to occur</li>\n </ul>\n </li>\n</ul>\n\n<b>Drop the whole row:</b>\n<ul>\n <li>\"price\": 4 missing data, simply delete the whole row\n <ul>\n <li>Reason: price is what we want to predict. Any data entry without price data cannot be used for prediction; therefore any row now without price data is not useful to us</li>\n </ul>\n </li>\n</ul>", "_____no_output_____" ], [ "<h4>Calculate the average of the column </h4>", "_____no_output_____" ] ], [ [ "avg_norm_loss = df[\"normalized-losses\"].astype(\"float\").mean(axis=0)\nprint(\"Average of normalized-losses:\", avg_norm_loss)", "Average of normalized-losses: 122.0\n" ] ], [ [ "<h4>Replace \"NaN\" by mean value in \"normalized-losses\" column</h4>", "_____no_output_____" ] ], [ [ "df[\"normalized-losses\"].replace(np.nan, avg_norm_loss, inplace=True)", "_____no_output_____" ] ], [ [ "<h4>Calculate the mean value for 'bore' column</h4>", "_____no_output_____" ] ], [ [ "avg_bore=df['bore'].astype('float').mean(axis=0)\nprint(\"Average of bore:\", avg_bore)", "Average of bore: 3.3297512437810943\n" ] ], [ [ "<h4>Replace NaN by mean value</h4>", "_____no_output_____" ] ], [ [ "df[\"bore\"].replace(np.nan, avg_bore, inplace=True)", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-danger alertdanger\" style=\"margin-top: 20px\">\n<h1> Question #1: </h1>\n\n<b>According to the example above, replace NaN in \"stroke\" column by mean.</b>\n</div>", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute \nmean_stroke=df[['stroke']].astype(\"float\").mean(axis=0)\ndf['stroke'].replace(np.nan,mean_stroke,inplace=True)", "_____no_output_____" ] ], [ [ "Double-click <b>here</b> for the solution.\n\n<!-- The answer is below:\n\n# calculate the mean vaule for \"stroke\" column\navg_stroke = df[\"stroke\"].astype(\"float\").mean(axis = 0)\nprint(\"Average of stroke:\", avg_stroke)\n\n# replace NaN by mean value in \"stroke\" column\ndf[\"stroke\"].replace(np.nan, avg_stroke, inplace = True)\n\n-->\n", "_____no_output_____" ], [ "<h4>Calculate the mean value for the 'horsepower' column:</h4>", "_____no_output_____" ] ], [ [ "avg_horsepower = df['horsepower'].astype('float').mean(axis=0)\nprint(\"Average horsepower:\", avg_horsepower)", "Average horsepower: 104.25615763546799\n" ] ], [ [ "<h4>Replace \"NaN\" by mean value:</h4>", "_____no_output_____" ] ], [ [ "df['horsepower'].replace(np.nan, avg_horsepower, inplace=True)", "_____no_output_____" ] ], [ [ "<h4>Calculate the mean value for 'peak-rpm' column:</h4>", "_____no_output_____" ] ], [ [ "avg_peakrpm=df['peak-rpm'].astype('float').mean(axis=0)\nprint(\"Average peak rpm:\", avg_peakrpm)", "Average peak rpm: 5125.369458128079\n" ] ], [ [ "<h4>Replace NaN by mean value:</h4>", "_____no_output_____" ] ], [ [ "df['peak-rpm'].replace(np.nan, avg_peakrpm, inplace=True)", "_____no_output_____" ] ], [ [ "To see which values are present in a particular column, we can use the \".value_counts()\" method:", "_____no_output_____" ] ], [ [ "df['num-of-doors'].value_counts()", "_____no_output_____" ] ], [ [ "We can see that four doors are the most common type. We can also use the \".idxmax()\" method to calculate for us the most common type automatically:", "_____no_output_____" ] ], [ [ "df['num-of-doors'].value_counts().idxmax()", "_____no_output_____" ] ], [ [ "The replacement procedure is very similar to what we have seen previously", "_____no_output_____" ] ], [ [ "#replace the missing 'num-of-doors' values by the most frequent \ndf[\"num-of-doors\"].replace(np.nan, \"four\", inplace=True)", "_____no_output_____" ] ], [ [ "Finally, let's drop all rows that do not have price data:", "_____no_output_____" ] ], [ [ "# simply drop whole row with NaN in \"price\" column\ndf.dropna(subset=[\"price\"], axis=0, inplace=True)\n\n# reset index, because we droped two rows\ndf.reset_index(drop=True, inplace=True)", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ] ], [ [ "<b>Good!</b> Now, we obtain the dataset with no missing values.", "_____no_output_____" ], [ "<h3 id=\"correct_data_format\">Correct data format</h3>\n<b>We are almost there!</b>\n<p>The last step in data cleaning is checking and making sure that all data is in the correct format (int, float, text or other).</p>\n\nIn Pandas, we use \n<p><b>.dtype()</b> to check the data type</p>\n<p><b>.astype()</b> to change the data type</p>", "_____no_output_____" ], [ "<h4>Lets list the data types for each column</h4>", "_____no_output_____" ] ], [ [ "df.dtypes", "_____no_output_____" ] ], [ [ "<p>As we can see above, some columns are not of the correct data type. Numerical variables should have type 'float' or 'int', and variables with strings such as categories should have type 'object'. For example, 'bore' and 'stroke' variables are numerical values that describe the engines, so we should expect them to be of the type 'float' or 'int'; however, they are shown as type 'object'. We have to convert data types into a proper format for each column using the \"astype()\" method.</p> ", "_____no_output_____" ], [ "<h4>Convert data types to proper format</h4>", "_____no_output_____" ] ], [ [ "df[[\"bore\", \"stroke\"]] = df[[\"bore\", \"stroke\"]].astype(\"float\")\ndf[[\"normalized-losses\"]] = df[[\"normalized-losses\"]].astype(\"int\")\ndf[[\"price\"]] = df[[\"price\"]].astype(\"float\")\ndf[[\"peak-rpm\"]] = df[[\"peak-rpm\"]].astype(\"float\")", "_____no_output_____" ] ], [ [ "<h4>Let us list the columns after the conversion</h4>", "_____no_output_____" ] ], [ [ "df.dtypes", "_____no_output_____" ] ], [ [ "<b>Wonderful!</b>\n\nNow, we finally obtain the cleaned dataset with no missing values and all data in its proper format.", "_____no_output_____" ], [ "<h2 id=\"data_standardization\">Data Standardization</h2>\n<p>\nData is usually collected from different agencies with different formats.\n(Data Standardization is also a term for a particular type of data normalization, where we subtract the mean and divide by the standard deviation)\n</p>\n \n<b>What is Standardization?</b>\n<p>Standardization is the process of transforming data into a common format which allows the researcher to make the meaningful comparison.\n</p>\n\n<b>Example</b>\n<p>Transform mpg to L/100km:</p>\n<p>In our dataset, the fuel consumption columns \"city-mpg\" and \"highway-mpg\" are represented by mpg (miles per gallon) unit. Assume we are developing an application in a country that accept the fuel consumption with L/100km standard</p>\n<p>We will need to apply <b>data transformation</b> to transform mpg into L/100km?</p>\n", "_____no_output_____" ], [ "<p>The formula for unit conversion is<p>\nL/100km = 235 / mpg\n<p>We can do many mathematical operations directly in Pandas.</p>", "_____no_output_____" ] ], [ [ "df.head()", "_____no_output_____" ], [ "# Convert mpg to L/100km by mathematical operation (235 divided by mpg)\ndf['city-L/100km'] = 235/df[\"city-mpg\"]\n\n# check your transformed data \ndf.head()", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-danger alertdanger\" style=\"margin-top: 20px\">\n<h1> Question #2: </h1>\n\n<b>According to the example above, transform mpg to L/100km in the column of \"highway-mpg\", and change the name of column to \"highway-L/100km\".</b>\n</div>", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute \ndf[\"highway-L/100km\"]=235/df[\"highway-mpg\"]\ndf.rename(columns={'\"highway-mpg\"':'highway-L/100km'},inplace=True)\ndf.head()", "_____no_output_____" ] ], [ [ "Double-click <b>here</b> for the solution.\n\n<!-- The answer is below:\n\n# transform mpg to L/100km by mathematical operation (235 divided by mpg)\ndf[\"highway-mpg\"] = 235/df[\"highway-mpg\"]\n\n# rename column name from \"highway-mpg\" to \"highway-L/100km\"\ndf.rename(columns={'\"highway-mpg\"':'highway-L/100km'}, inplace=True)\n\n# check your transformed data \ndf.head()\n\n-->\n", "_____no_output_____" ], [ "<h2 id=\"data_normalization\">Data Normalization</h2>\n\n<b>Why normalization?</b>\n<p>Normalization is the process of transforming values of several variables into a similar range. Typical normalizations include scaling the variable so the variable average is 0, scaling the variable so the variance is 1, or scaling variable so the variable values range from 0 to 1\n</p>\n\n<b>Example</b>\n<p>To demonstrate normalization, let's say we want to scale the columns \"length\", \"width\" and \"height\" </p>\n<p><b>Target:</b>would like to Normalize those variables so their value ranges from 0 to 1.</p>\n<p><b>Approach:</b> replace original value by (original value)/(maximum value)</p>", "_____no_output_____" ] ], [ [ "# replace (original value) by (original value)/(maximum value)\ndf['length'] = df['length']/df['length'].max()\ndf['width'] = df['width']/df['width'].max()", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-danger alertdanger\" style=\"margin-top: 20px\">\n<h1> Questiont #3: </h1>\n\n<b>According to the example above, normalize the column \"height\".</b>\n</div>", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute \ndf['height']=df['height']/df['height'].max()\ndf[['height']].head()", "_____no_output_____" ] ], [ [ "Double-click <b>here</b> for the solution.\n\n<!-- The answer is below:\n\ndf['height'] = df['height']/df['height'].max() \n# show the scaled columns\ndf[[\"length\",\"width\",\"height\"]].head()\n\n-->", "_____no_output_____" ], [ "Here we can see, we've normalized \"length\", \"width\" and \"height\" in the range of [0,1].", "_____no_output_____" ], [ "<h2 id=\"binning\">Binning</h2>\n<b>Why binning?</b>\n<p>\n Binning is a process of transforming continuous numerical variables into discrete categorical 'bins', for grouped analysis.\n</p>\n\n<b>Example: </b>\n<p>In our dataset, \"horsepower\" is a real valued variable ranging from 48 to 288, it has 57 unique values. What if we only care about the price difference between cars with high horsepower, medium horsepower, and little horsepower (3 types)? Can we rearrange them into three ‘bins' to simplify analysis? </p>\n\n<p>We will use the Pandas method 'cut' to segment the 'horsepower' column into 3 bins </p>\n\n", "_____no_output_____" ], [ "<h3>Example of Binning Data In Pandas</h3>", "_____no_output_____" ], [ " Convert data to correct format ", "_____no_output_____" ] ], [ [ "df[\"horsepower\"]=df[\"horsepower\"].astype(int, copy=True)", "_____no_output_____" ] ], [ [ "Lets plot the histogram of horspower, to see what the distribution of horsepower looks like.", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib as plt\nfrom matplotlib import pyplot\nplt.pyplot.hist(df[\"horsepower\"])\n\n# set x/y labels and plot title\nplt.pyplot.xlabel(\"horsepower\")\nplt.pyplot.ylabel(\"count\")\nplt.pyplot.title(\"horsepower bins\")", "_____no_output_____" ] ], [ [ "<p>We would like 3 bins of equal size bandwidth so we use numpy's <code>linspace(start_value, end_value, numbers_generated</code> function.</p>\n<p>Since we want to include the minimum value of horsepower we want to set start_value=min(df[\"horsepower\"]).</p>\n<p>Since we want to include the maximum value of horsepower we want to set end_value=max(df[\"horsepower\"]).</p>\n<p>Since we are building 3 bins of equal length, there should be 4 dividers, so numbers_generated=4.</p>", "_____no_output_____" ], [ "We build a bin array, with a minimum value to a maximum value, with bandwidth calculated above. The bins will be values used to determine when one bin ends and another begins.", "_____no_output_____" ] ], [ [ "bins = np.linspace(min(df[\"horsepower\"]), max(df[\"horsepower\"]), 4)\nbins", "_____no_output_____" ] ], [ [ " We set group names:", "_____no_output_____" ] ], [ [ "group_names = ['Low', 'Medium', 'High']", "_____no_output_____" ] ], [ [ " We apply the function \"cut\" the determine what each value of \"df['horsepower']\" belongs to. ", "_____no_output_____" ] ], [ [ "df['horsepower-binned'] = pd.cut(df['horsepower'], bins, labels=group_names, include_lowest=True )\ndf[['horsepower','horsepower-binned']].head(20)", "_____no_output_____" ] ], [ [ "Lets see the number of vehicles in each bin.", "_____no_output_____" ] ], [ [ "df[\"horsepower-binned\"].value_counts()", "_____no_output_____" ] ], [ [ "Lets plot the distribution of each bin.", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib as plt\nfrom matplotlib import pyplot\npyplot.bar(group_names, df[\"horsepower-binned\"].value_counts())\n\n# set x/y labels and plot title\nplt.pyplot.xlabel(\"horsepower\")\nplt.pyplot.ylabel(\"count\")\nplt.pyplot.title(\"horsepower bins\")", "_____no_output_____" ] ], [ [ "<p>\n Check the dataframe above carefully, you will find the last column provides the bins for \"horsepower\" with 3 categories (\"Low\",\"Medium\" and \"High\"). \n</p>\n<p>\n We successfully narrow the intervals from 57 to 3!\n</p>", "_____no_output_____" ], [ "<h3>Bins visualization</h3>\nNormally, a histogram is used to visualize the distribution of bins we created above. ", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib as plt\nfrom matplotlib import pyplot\n\na = (0,1,2)\n\n# draw historgram of attribute \"horsepower\" with bins = 3\nplt.pyplot.hist(df[\"horsepower\"], bins = 3)\n\n# set x/y labels and plot title\nplt.pyplot.xlabel(\"horsepower\")\nplt.pyplot.ylabel(\"count\")\nplt.pyplot.title(\"horsepower bins\")", "_____no_output_____" ] ], [ [ "The plot above shows the binning result for attribute \"horsepower\". ", "_____no_output_____" ], [ "<h2 id=\"indicator\">Indicator variable (or dummy variable)</h2>\n<b>What is an indicator variable?</b>\n<p>\n An indicator variable (or dummy variable) is a numerical variable used to label categories. They are called 'dummies' because the numbers themselves don't have inherent meaning. \n</p>\n\n<b>Why we use indicator variables?</b>\n<p>\n So we can use categorical variables for regression analysis in the later modules.\n</p>\n<b>Example</b>\n<p>\n We see the column \"fuel-type\" has two unique values, \"gas\" or \"diesel\". Regression doesn't understand words, only numbers. To use this attribute in regression analysis, we convert \"fuel-type\" into indicator variables.\n</p>\n\n<p>\n We will use the panda's method 'get_dummies' to assign numerical values to different categories of fuel type. \n</p>", "_____no_output_____" ] ], [ [ "df.columns", "_____no_output_____" ] ], [ [ "get indicator variables and assign it to data frame \"dummy_variable_1\" ", "_____no_output_____" ] ], [ [ "dummy_variable_1 = pd.get_dummies(df[\"fuel-type\"])\ndummy_variable_1.head()", "_____no_output_____" ] ], [ [ "change column names for clarity ", "_____no_output_____" ] ], [ [ "dummy_variable_1.rename(columns={'fuel-type-diesel':'gas', 'fuel-type-diesel':'diesel'}, inplace=True)\ndummy_variable_1.head()", "_____no_output_____" ] ], [ [ "We now have the value 0 to represent \"gas\" and 1 to represent \"diesel\" in the column \"fuel-type\". We will now insert this column back into our original dataset. ", "_____no_output_____" ] ], [ [ "# merge data frame \"df\" and \"dummy_variable_1\" \ndf = pd.concat([df, dummy_variable_1], axis=1)\n\n# drop original column \"fuel-type\" from \"df\"\ndf.drop(\"fuel-type\", axis = 1, inplace=True)", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ] ], [ [ "The last two columns are now the indicator variable representation of the fuel-type variable. It's all 0s and 1s now.", "_____no_output_____" ], [ "<div class=\"alert alert-danger alertdanger\" style=\"margin-top: 20px\">\n<h1> Question #4: </h1>\n\n<b>As above, create indicator variable to the column of \"aspiration\": \"std\" to 0, while \"turbo\" to 1.</b>\n</div>", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute \ndummy_variable_2=pd.get_dummies(df['aspiration'])\ndummy_variable_2.rename(columns={'std':'aspiration-std','turbo':'aspiration-turbo'},inplace=True)\ndummy_variable_2.head()", "_____no_output_____" ] ], [ [ "Double-click <b>here</b> for the solution.\n\n<!-- The answer is below:\n\n# get indicator variables of aspiration and assign it to data frame \"dummy_variable_2\"\ndummy_variable_2 = pd.get_dummies(df['aspiration'])\n\n# change column names for clarity\ndummy_variable_2.rename(columns={'std':'aspiration-std', 'turbo': 'aspiration-turbo'}, inplace=True)\n\n# show first 5 instances of data frame \"dummy_variable_1\"\ndummy_variable_2.head()\n\n-->", "_____no_output_____" ], [ " <div class=\"alert alert-danger alertdanger\" style=\"margin-top: 20px\">\n<h1> Question #5: </h1>\n\n<b>Merge the new dataframe to the original dataframe then drop the column 'aspiration'</b>\n</div>", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute \ndf=pd.concat([df,dummy_variable_2],axis=1)\ndf.head()", "_____no_output_____" ] ], [ [ "Double-click <b>here</b> for the solution.\n\n<!-- The answer is below:\n\n#merge the new dataframe to the original datafram\ndf = pd.concat([df, dummy_variable_2], axis=1)\n\n# drop original column \"aspiration\" from \"df\"\ndf.drop('aspiration', axis = 1, inplace=True)\n\n-->", "_____no_output_____" ], [ "save the new csv ", "_____no_output_____" ] ], [ [ "df.to_csv('clean_df.csv')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
4a7c71c57fa44c3a8f7abe009b4e128938df5712
9,556
ipynb
Jupyter Notebook
source/SpringBoChap_PureFoodNet.ipynb
chairiq/FoodCNNs
fefd0eefb42401369894beefc71208d199a6876c
[ "MIT" ]
7
2020-05-19T13:27:58.000Z
2022-01-24T06:02:25.000Z
source/SpringBoChap_PureFoodNet.ipynb
chairiq/FoodCNNs
fefd0eefb42401369894beefc71208d199a6876c
[ "MIT" ]
null
null
null
source/SpringBoChap_PureFoodNet.ipynb
chairiq/FoodCNNs
fefd0eefb42401369894beefc71208d199a6876c
[ "MIT" ]
2
2020-07-10T08:06:32.000Z
2021-04-23T16:16:16.000Z
32.39322
131
0.530138
[ [ [ "## PureFoodNet implementation", "_____no_output_____" ] ], [ [ "#libraries\nfrom tensorflow import keras\nfrom tensorflow.keras.optimizers import Adam, RMSprop \nfrom tensorflow.keras.models import Sequential \nfrom tensorflow.keras.layers import Dense, Activation, Dropout, Flatten, Conv2D\nfrom tensorflow.keras.layers import MaxPool2D, BatchNormalization, GlobalAveragePooling2D\nfrom tensorflow.keras.regularizers import l2\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.callbacks import ModelCheckpoint, CSVLogger, EarlyStopping, ReduceLROnPlateau, LearningRateScheduler\n\nK.clear_session()", "_____no_output_____" ], [ "class PureFoodNet:\n # The model\n def getModel(input_shape=(224,224,3), num_classes=3):\n \n model = Sequential()\n \n #Block 1\n model.add(Conv2D(input_shape = input_shape,\n filters = 128, kernel_size = (5,5), strides = 2, padding = 'Same', name='block1_conv1',\n activation ='relu', kernel_initializer='he_normal'))\n model.add(Conv2D(filters = 128, kernel_size = (5,5), strides = 2, padding = 'Same', name='block1_conv2',\n activation ='relu',kernel_initializer='he_normal'))\n model.add(MaxPool2D(strides=(2, 2), name='block1_pool'))\n model.add(BatchNormalization())\n model.add(Dropout(0.25))\n \n #Block 2\n model.add(Conv2D(filters = 256, kernel_size = (3,3),padding = 'Same', name='block2_conv1',\n activation ='relu',kernel_initializer='he_normal'))\n model.add(Conv2D(filters = 256, kernel_size = (3,3),padding = 'Same', name='block2_conv2',\n activation ='relu',kernel_initializer='he_normal'))\n model.add(Conv2D(filters = 256, kernel_size = (3,3),padding = 'Same', name='block2_conv3',\n activation ='relu',kernel_initializer='he_normal'))\n model.add(MaxPool2D(strides=(2, 2), name='block2_pool'))\n model.add(BatchNormalization())\n model.add(Dropout(0.35))\n \n #Block 3\n model.add(Conv2D(filters = 512, kernel_size = (3,3),padding = 'Same', name='block3_conv1',\n activation ='relu',kernel_initializer='he_normal'))\n model.add(Conv2D(filters = 512, kernel_size = (3,3),padding = 'Same', name='block3_conv2',\n activation ='relu',kernel_initializer='he_normal'))\n model.add(Conv2D(filters = 512, kernel_size = (3,3),padding = 'Same', name='block3_conv3',\n activation ='relu',kernel_initializer='he_normal'))\n model.add(MaxPool2D(strides=(2, 2), name='block3_pool'))\n model.add(BatchNormalization())\n model.add(Dropout(0.35))\n \n #Block 4\n model.add(GlobalAveragePooling2D())\n model.add(Dense(512, activation = \"relu\", kernel_initializer='he_normal'))\n model.add(Dropout(0.4))\n model.add(Dense(num_classes,\n activation = \"softmax\",\n kernel_initializer='he_normal',\n kernel_regularizer=l2()))\n\n return model", "_____no_output_____" ], [ "img_width, img_height = 299, 299\ntrain_data_dir = 'food-101/train/'\nvalidation_data_dir = 'food-101/test/'\nspecific_classes = None #['apple_pie', 'greek_salad', 'baklava']\nbatch_size = 128\n\ntrain_datagen = ImageDataGenerator(\n rescale=1. / 255,\n rotation_range=10,\n width_shift_range=0.05,\n height_shift_range=0.05,\n shear_range=0.2,\n zoom_range=0.2,\n channel_shift_range=10,\n horizontal_flip=True,\n fill_mode='constant' \n)\n\ntest_datagen = ImageDataGenerator(rescale=1. / 255)\n\ntrain_generator = train_datagen.flow_from_directory(\n classes = specific_classes,\n directory = train_data_dir,\n target_size=(img_height, img_width),\n batch_size=batch_size,\n class_mode='categorical')\n\nvalidation_generator = test_datagen.flow_from_directory(\n classes = specific_classes,\n directory = validation_data_dir,\n target_size=(img_height, img_width),\n batch_size=batch_size,\n class_mode='categorical')\n\nnb_train_samples = train_generator.n\nnb_validation_samples = validation_generator.n\nn_classes = train_generator.num_classes\n\nmodel_name = 'PureFoodNet_299x299Nadam_2'\nepoch_num = 50", "_____no_output_____" ], [ "model = PureFoodNet.getModel(input_shape=train_generator.image_shape,\n num_classes = n_classes)\nmodel.summary()", "_____no_output_____" ], [ "# learning rate scheduler\ndef schedule(epoch):\n if epoch < 10:\n new_lr = .001\n elif epoch < 14:\n new_lr = .0006\n elif epoch < 17:\n new_lr = .0003\n elif epoch < 20:\n new_lr = .0001\n elif epoch < 23:\n new_lr = .00005\n else:\n new_lr = .00001\n \n print(\"\\nLR at epoch {} = {} \\n\".format(epoch,new_lr))\n return new_lr\n \nlr_scheduler = LearningRateScheduler(schedule)", "_____no_output_____" ], [ "model.compile(optimizer='Nadam', \n loss='categorical_crossentropy', \n metrics=['accuracy','top_k_categorical_accuracy'])\n\ncheckpointer = ModelCheckpoint(filepath='best_model_food101_'+model_name+'.hdf5',\n verbose=1,\n save_best_only=True)\n\ncsv_logger = CSVLogger('hist_food101_'+model_name+'.log')\n", "_____no_output_____" ], [ "hist = model.fit_generator(train_generator,\n steps_per_epoch = nb_train_samples // batch_size,\n validation_data = validation_generator,\n validation_steps = nb_validation_samples // batch_size,\n epochs = epoch_num,\n verbose = 1,\n callbacks = [csv_logger, checkpointer, lr_scheduler]\n )", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
4a7c7a21bd485c1ecac81190cdd570bdc00b3e4a
63,454
ipynb
Jupyter Notebook
3. Norms and Distances.ipynb
csoehnel/DSR-FundamentalsOfML
6f7dd08ff9549ef2c0da5f8b453c2f6fa42cce98
[ "MIT" ]
null
null
null
3. Norms and Distances.ipynb
csoehnel/DSR-FundamentalsOfML
6f7dd08ff9549ef2c0da5f8b453c2f6fa42cce98
[ "MIT" ]
null
null
null
3. Norms and Distances.ipynb
csoehnel/DSR-FundamentalsOfML
6f7dd08ff9549ef2c0da5f8b453c2f6fa42cce98
[ "MIT" ]
null
null
null
185.538012
53,604
0.896744
[ [ [ "from IPython.display import HTML\ncss_file = './custom.css'\nHTML(open(css_file, \"r\").read())", "_____no_output_____" ] ], [ [ "# Norms and Distances\n\n© 2018 Daniel Voigt Godoy", "_____no_output_____" ], [ "## 1. Definition\n\nFrom [Wikipedia](https://en.wikipedia.org/wiki/Norm_(mathematics)):\n\n ...a norm is a function that assigns a strictly positive length or size to each vector in a vector space — except for the zero vector, which is assigned a length of zero.\n \n### 1.1 Euclidean Distance\n\nYou probably know the most common norm of them all: $\\ell_2$ norm (or distance). This is the ***Euclidean Distance*** commonly referred to as the distance between two points:\n\n$$\n\\ell_2 = ||x||_2 = \\sqrt{|x_1|^2 + \\dots + |x_n|^2} = \\sqrt{\\sum_{i=1}^n|x_i|^2}\n$$\n\n![euclidean distance](https://upload.wikimedia.org/wikipedia/commons/thumb/1/10/Euclidean_distance_3d_2_cropped.png/295px-Euclidean_distance_3d_2_cropped.png)\n<center>Source: Wikipedia</center>\n\n### 1.2 Manhattan Distance\n\nYou may also have heard of the $\\ell_1$ norm (or distance). This is called ***Manhattan Distance***:\n\n$$\n\\ell_1 = ||x||_1 = |x_1| + \\dots + |x_n| = \\sum_{i=1}^n|x_i|\n$$\n\n![manhattan distance](https://upload.wikimedia.org/wikipedia/commons/thumb/0/08/Manhattan_distance.svg/240px-Manhattan_distance.svg.png)\n<center>Source: Wikipedia</center>\n\n### 1.3 Minkowski Distance of order *p*\n\nThere is a pattern to it... you add up all elements exponentiated to the \"number\" of the norm (1 or 2 in the examples above), then you take the \"number\"-root of the result.\n\nIf we say this \"number\" is $p$, we can write the formula like this:\n\n$$\n||\\boldsymbol{x}||_p = \\bigg(\\sum_{i=1}^{n}|x_i|^p\\bigg)^{\\frac{1}{p}}\n$$\n\n### 1.4 Infinity Norm\n\nThis is a special case, which is equivalent to taking the maximum absolute value of all values:\n\n$$\n||\\boldsymbol{x}||_{\\infty} = max(|x_1|, \\dots, |x_n|)\n$$", "_____no_output_____" ], [ "## 2. Experiment\n\nTime to try it yourself!\n\nThe slider below allows you to change $p$ to get the contour plots for different norms.\n\nUse the slider to play with different configurations and answer the ***questions*** below.", "_____no_output_____" ] ], [ [ "from intuitiveml.algebra.Norm import *\nfrom intuitiveml.utils import gen_button", "_____no_output_____" ], [ "norm = plotNorm()\nvb = VBox(build_figure(norm), layout={'align_items': 'center'})", "_____no_output_____" ], [ "vb", "_____no_output_____" ] ], [ [ "#### Questions\n\n1. What happens to the general ***level*** of values (look at the colorscale) as $p$ increases?\n2. Let's compare Manhattan to Euclidean distances:\n - Using ***Manhattan Distance***, hover your mouse over any point along the ***x axis*** (y = 0) and note its coordinates: its Z value is the computed distance.\n - Using ***Euclidean Distance***, go to the same point and note its coordinates. What happens to the computed distance? Did it get bigger / smaller?\n - Repeat the process, but this time choose a point along the ***diagonal*** (x and y having the same value). How do the distances compare to each other?", "_____no_output_____" ], [ "1.) Has full range of color values at l1. Increasing norm = reducing overall scale.\n\n2.) weights are on x, y axis of above graph -> distance to origin is norm (distance measured by according distance to norm, e.g. manhattan for l1)", "_____no_output_____" ], [ "## 3. Comparing Norms", "_____no_output_____" ], [ "Here are plots for different $p$-norms, side by side, for easier comparison.\n\nIt is also possible to have $p$ values smaller than one, which yield \"pointy\" figures like the first one.\n\nOn the opposite end, if we use a $p$ value of a 100, it is already pretty close to the depicting the ***maximum*** value of the coordinates (as expected for the ***infinity norm***)", "_____no_output_____" ] ], [ [ "f = plot_norms()", "_____no_output_____" ] ], [ [ "## 4. Numpy\n\n[np.linalg.norm](https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.linalg.norm.html)", "_____no_output_____" ], [ "#### This material is copyright Daniel Voigt Godoy and made available under the Creative Commons Attribution (CC-BY) license ([link](https://creativecommons.org/licenses/by/4.0/)). \n\n#### Code is also made available under the MIT License ([link](https://opensource.org/licenses/MIT)).", "_____no_output_____" ] ], [ [ "from IPython.display import HTML\nHTML('''<script>\n function code_toggle() {\n if (code_shown){\n $('div.input').hide('500');\n $('#toggleButton').val('Show Code')\n } else {\n $('div.input').show('500');\n $('#toggleButton').val('Hide Code')\n }\n code_shown = !code_shown\n }\n\n $( document ).ready(function(){\n code_shown=false;\n $('div.input').hide()\n });\n</script>\n<form action=\"javascript:code_toggle()\"><input type=\"submit\" id=\"toggleButton\" value=\"Show Code\"></form>''')", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
4a7c7c2f3afeb046b2b6f5496df8172044927704
2,711
ipynb
Jupyter Notebook
locale/examples/00-load/read-dolfin.ipynb
tkoyama010/pyvista-doc-translations
23bb813387b7f8bfe17e86c2244d5dd2243990db
[ "MIT" ]
4
2020-08-07T08:19:19.000Z
2020-12-04T09:51:11.000Z
locale/examples/00-load/read-dolfin.ipynb
tkoyama010/pyvista-doc-translations
23bb813387b7f8bfe17e86c2244d5dd2243990db
[ "MIT" ]
19
2020-08-06T00:24:30.000Z
2022-03-30T19:22:24.000Z
locale/examples/00-load/read-dolfin.ipynb
tkoyama010/pyvista-doc-translations
23bb813387b7f8bfe17e86c2244d5dd2243990db
[ "MIT" ]
1
2021-03-09T07:50:40.000Z
2021-03-09T07:50:40.000Z
25.101852
180
0.517521
[ [ [ "%matplotlib inline\nfrom pyvista import set_plot_theme\nset_plot_theme('document')", "_____no_output_____" ] ], [ [ "\n# Read FEniCS/Dolfin Meshes\n\nPyVista leverages `meshio`_ to read many mesh formats not natively supported\nby VTK including the `FEniCS/Dolfin`_ XML format.\n\n\n", "_____no_output_____" ] ], [ [ "import pyvista as pv\nfrom pyvista import examples", "_____no_output_____" ] ], [ [ "Let's download an example FEniCS/Dolfin mesh from our example data\nrepository. This will download an XML Dolfin mesh and save it to PyVista's\ndata directory.\n\n", "_____no_output_____" ] ], [ [ "saved_file, _ = examples.downloads._download_file(\"dolfin_fine.xml\")\nprint(saved_file)", "_____no_output_____" ] ], [ [ "As shown, we now have an XML Dolfin mesh save locally. This filename can be\npassed directly to PyVista's :func:`pyvista.read` method to be read into\na PyVista mesh.\n\n", "_____no_output_____" ] ], [ [ "dolfin = pv.read(saved_file)\ndolfin", "_____no_output_____" ] ], [ [ "Now we can do stuff with that Dolfin mesh!\n\n", "_____no_output_____" ] ], [ [ "qual = dolfin.compute_cell_quality()\nqual.plot(show_edges=True, cpos=\"xy\")", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a7c8e50e043f9ff9a129c5772e876e685f34cc2
34,943
ipynb
Jupyter Notebook
rnn_from_scratch.ipynb
medhavempati/DeepLearning
eb7d239872a8445b29ac797b99c71f0276475314
[ "MIT" ]
null
null
null
rnn_from_scratch.ipynb
medhavempati/DeepLearning
eb7d239872a8445b29ac797b99c71f0276475314
[ "MIT" ]
null
null
null
rnn_from_scratch.ipynb
medhavempati/DeepLearning
eb7d239872a8445b29ac797b99c71f0276475314
[ "MIT" ]
null
null
null
32.810329
139
0.498927
[ [ [ "import numpy as np\nfrom collections import defaultdict\nfrom torch.utils import data\n\nimport matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ], [ "# Generate Dataset\nnp.random.seed(42)", "_____no_output_____" ], [ "def generate_dataset(num_sequences=2**8):\n sequences = []\n for _ in range(num_sequences):\n token_length = np.random.randint(1, 12)\n sequence = f'{\"a\"*token_length}{\"b\"*token_length}EOS'\n sequences.append(sequence)\n \n return sequences", "_____no_output_____" ], [ "def word_encoding(sequences):\n \n # Get 1D list of all words in all sequences\n flatten = lambda l: [item for sublist in l for item in sublist]\n all_words = flatten(sequences)\n \n # Create dictionary mapping word to word frequency across all sequences\n word_to_count = defaultdict(int)\n for word in all_words:\n word_to_count[word] += 1\n word_to_count = sorted(list(word_to_count.items()), key=lambda l: -l[1]) # sorting according to frequency\n \n # List of unique words\n dictionary = [item[0] for item in word_to_count]\n dictionary.append('UNK')\n \n # Calculate lengths\n num_sequences = len(sequences)\n vocab_size = len(dictionary)\n \n # Make word to index and index to word mappings\n word_to_idx = defaultdict(lambda: vocab_size-1)\n idx_to_word = defaultdict(lambda: 'UNK')\n for idx, word in enumerate(dictionary):\n word_to_idx[word] = idx\n idx_to_word[idx] = word\n \n return word_to_idx, idx_to_word, vocab_size", "_____no_output_____" ], [ "def one_hot_encode(idx, vocab_size):\n \"\"\"\n One-hot encodes a single word given its index and the size of the vocabulary.\n \n Args:\n `idx`: the index of the given word\n `vocab_size`: the size of the vocabulary\n \n Returns a 1-D numpy array of length `vocab_size`.\n \"\"\"\n # Initialize the encoded array\n one_hot = np.zeros(vocab_size)\n \n # Set the appropriate element to one\n one_hot[idx] = 1.0\n\n return one_hot\n\n\ndef one_hot_encode_sequence(sequence, vocab_size, word_to_idx):\n \"\"\"\n One-hot encodes a sequence of words given a fixed vocabulary size.\n \n Args:\n `sentence`: a list of words to encode\n `vocab_size`: the size of the vocabulary\n \n Returns a 3-D numpy array of shape (num words, vocab size, 1).\n \"\"\"\n # Encode each word in the sentence\n encoding = np.array([one_hot_encode(word_to_idx[word], vocab_size) for word in sequence])\n\n # Reshape encoding s.t. it has shape (num words, vocab size, 1)\n encoding = encoding.reshape(encoding.shape[0], encoding.shape[1], 1)\n \n return encoding", "_____no_output_____" ], [ "class Dataset(data.Dataset):\n def __init__(self, inputs, targets):\n self.X = inputs\n self.y = targets\n\n def __len__(self):\n return len(self.X)\n\n def __getitem__(self, index):\n return self.X[index], self.y[index]", "_____no_output_____" ], [ "def prepare_data(sequences, train_size=0.8, test_size=0.1, val_size=0.1):\n \n # Split data\n num_train = int(train_size*len(sequences))\n num_test = int(test_size*len(sequences))\n num_val = int(val_size*len(sequences))\n# print(f'{num_train}, {num_test}, {num_val}')\n \n train_seq = sequences[:num_train]\n test_seq = sequences[num_train:num_train+num_test]\n val_seq = sequences[-num_val:]\n# print(f'{len(train_seq)}, {len(test_seq)}, {len(val_seq)}')\n \n # prepare input & target sequences\n def prepare_sequences(sequences):\n inputs = []\n targets = []\n \n for sequence in sequences:\n inputs.append(sequence[:-1])\n targets.append(sequence[1:])\n \n return inputs, targets\n \n train_inputs, train_targets = prepare_sequences(train_seq)\n test_inputs, test_targets = prepare_sequences(test_seq)\n val_inputs, val_targets = prepare_sequences(val_seq)\n# print(f'{len(train_inputs)}, {len(test_inputs)}, {len(val_inputs)}')\n \n # create datasets\n train_set = Dataset(train_inputs, train_targets)\n test_set = Dataset(test_inputs, test_targets)\n val_set = Dataset(val_inputs, val_targets)\n \n return train_set, test_set, val_set", "_____no_output_____" ], [ "# RNN from scratch", "_____no_output_____" ], [ "def init_orthogonal_weights(dim1, dim2):\n \n # initialize\n weights = np.random.randn(dim1, dim2)\n# print(f'inital random: {weights}')\n if dim1 < dim2:\n weights = weights.T\n \n # QR factorization (Q = orthogonal)\n q, r = np.linalg.qr(weights)\n# print(f'q: {q}')\n \n # Make Q uniform according to https://arxiv.org/pdf/math-ph/0609050.pdf\n d = np.diag(r, 0)\n ph = np.sign(d)\n q *= ph\n# print(f'q final: {q}')\n\n if dim1 < dim2:\n q = q.T\n \n return q # q is orthogonal", "_____no_output_____" ], [ "def init_rnn(hidden_size, vocab_size):\n '''\n Initializes RNN\n \n Args:\n hidden_size --> hidden state dimensions\n vocab_size --> input vector dimensions\n\n Returns:\n U --> Weight matrix applied to input, passed to hidden state\n V --> Weight matrix from previous hidden state passed to hidden state\n W --> Weight matrix applied to output from hidden state to give final output\n\n bias_hidden = bias applied in hidden state\n bias_output = bias applied to output\n '''\n \n U = init_orthogonal_weights(hidden_size, vocab_size)\n V = init_orthogonal_weights(hidden_size, hidden_size)\n W = init_orthogonal_weights(vocab_size, hidden_size)\n \n bias_hidden = init_orthogonal_weights(hidden_size, hidden_size)\n bias_output = init_orthogonal_weights(vocab_size, vocab_size)\n \n return (U, V, W, bias_hidden, bias_output)", "_____no_output_____" ], [ "# Activation Functions", "_____no_output_____" ], [ "def sigmoid(x, derivative=False):\n \"\"\"\n Computes sigmoid of array x\n \n Args:\n x --> input array\n derivative --> when set to True will return derivative instead of forward pass\n \"\"\"\n \n x_safe = x + 1e-12\n f = 1 / (1 + np.exp(-x_safe))\n \n if derivative: \n return f * (1 - f)\n else: \n return f", "_____no_output_____" ], [ "def tanh(x, derivative=False):\n \"\"\"\n Computes tanh of array x\n \n Args:\n x --> input array\n derivative --> when set to True will return derivative instead of forward pass\n \"\"\"\n \n x_safe = x + 1e-12\n f = (np.exp(x_safe)-np.exp(-x_safe))/(np.exp(x_safe)+np.exp(-x_safe))\n \n if derivative: \n return f * (1 - f)\n else: \n return f", "_____no_output_____" ], [ "def softmax(x):\n \"\"\"\n Computes softmax of array x\n \n Args:\n x --> input array\n \"\"\"\n return np.exp(x+1e-12) / np.sum(np.exp(x+1e-12))", "_____no_output_____" ], [ "# Forward Pass", "_____no_output_____" ], [ "def forward_pass(inputs, hidden_state, parameters):\n \n U, V, W, bias_hidden, bias_output = parameters\n outputs, hidden_states = [], [hidden_state]\n# print(f'U: {U}, V: {V}, W: {W}')\n \n for i in range(len(inputs)):\n \n# print(f'U: {U.shape}, input: {inputs[i].shape}, v: {V.shape}, hidden: {hidden_state.shape}')\n hidden_state = tanh((np.dot(U, inputs[i]) + np.dot(V, hidden_states[-1])))\n output = np.dot(W, hidden_state)\n# print(f'hidden: {hidden_state>0}, output: {output}')\n \n hidden_states.append(hidden_state)\n outputs.append(output)\n \n return outputs, hidden_states", "_____no_output_____" ], [ "def clip_gradient_norm(grads, max_norm=0.25):\n \"\"\"\n Prevents exploding gradient by clipping \n Clips gradients to have max norm of max_norm\n \"\"\"\n\n max_norm = float(max_norm)\n total_norm = 0\n\n # Using L2 norm squared\n for grad in grads:\n grad_norm = np.sum(np.power(grad, 2))\n total_norm += grad_norm\n\n total_norm = np.sqrt(total_norm)\n\n clip_coef = max_norm / (total_norm + 1e-6)\n\n if clip_coef < 1:\n for grad in grads:\n grad *= clip_coef\n\n return grads", "_____no_output_____" ], [ "def cross_entropy_loss(output, target):\n loss = 0\n for j in range(len(output)):\n \n# print(f'target: {target[j]}, out: {output[j]}, val: {output[j]}, log: {np.log(output[j] + 1e-9)}')\n loss += target[j] * np.log(output[j] + 1e-9) \n \n return -loss ", "_____no_output_____" ], [ "def backward_pass(inputs, outputs, hidden_states, targets, params):\n U, V, W, bias_hidden, bias_output = params\n \n # Initialize gradients as zero\n d_U, d_V, d_W = np.zeros_like(U), np.zeros_like(V), np.zeros_like(W)\n d_bias_hidden, d_bias_output = np.zeros_like(bias_hidden), np.zeros_like(bias_output)\n \n d_hidden_next = np.zeros_like(hidden_states[0])\n loss = 0\n \n # Iterate backwards through elements\n for i in reversed(range(len(outputs))):\n \n # Calculate loss\n# print(f'{cross_entropy_loss(outputs[i], targets[i])}')\n loss += (cross_entropy_loss(softmax(outputs[i]), targets[i])/len(targets))\n \n # Backpropagate into output\n d_output = outputs[i].copy()\n d_output[np.argmax(targets[i])] -= 1\n \n # Backpropagate into W\n# print(f'h: {hidden_states[i].T.shape}, out: {d_output.shape}')\n d_W += np.dot(d_output, hidden_states[i].T)\n d_bias_output += d_output\n \n # Backpropagate into h\n d_h = np.dot(W.T, d_output) + d_hidden_next\n \n # Backpropagate through non-linearity (tanh)\n d_f = (1 - hidden_states[i]**2) * d_h\n d_bias_hidden += d_f\n \n # Backpropagate into U\n# print(f'h: {inputs[i].T.shape}, out: {d_f.shape}')\n d_U += np.dot(d_f, inputs[i].T)\n \n # Backpropagate into V\n# print(f'h: {hidden_states[i-1].T.shape}, out: {d_f.shape}')\n d_V += np.dot(hidden_states[i-1].T, d_f)\n d_hidden_next = np.dot(V.T, d_f)\n \n # Clip gradients\n grads = d_U, d_V, d_W, d_bias_hidden, d_bias_output\n grads = clip_gradient_norm(grads)\n \n return loss, grads\n \n ", "_____no_output_____" ], [ "def optimizer(parameters, gradients, learning_rate=1e-3):\n for parameter, gradient in zip(parameters, gradients):\n parameter -= learning_rate * gradient\n \n return parameters", "_____no_output_____" ], [ "def encode_data(dataset, vocab_size, word_to_idx):\n \n x, y = [], []\n for inputs, targets in dataset:\n# print(f'input: {len(inputs)}\\ntargets{len(targets)}\\n')\n x.append(one_hot_encode_sequence(inputs, vocab_size, word_to_idx))\n y.append(one_hot_encode_sequence(targets, vocab_size, word_to_idx))\n \n# print(f'lengths {len(x)}, {len(y)}')\n return (x, y)", "_____no_output_____" ], [ "def train(training_set, hidden_state, parameters, epochs=1000):\n \n training_loss = []\n inputs, targets = training_set\n for i in range(epochs):\n \n epoch_training_loss = 0\n for x, y in zip(inputs, targets):\n hidden_state = np.zeros_like(hidden_state)\n\n # Forward pass\n outputs, hidden_states = forward_pass(x, hidden_state, parameters)\n\n # Backward pass\n loss, gradients = backward_pass(x, outputs, hidden_states, y, parameters)\n if np.isnan(loss):\n raise ValueError('ERROR: Gradients have vanished')\n\n # Update parameters (optimizer)\n parameters = optimizer(parameters, gradients)\n epoch_training_loss += loss\n \n training_loss.append(epoch_training_loss/len(training_set))\n \n if i%100 == 0:\n print(f'Epoch {i}, training loss: {training_loss[-1]}')\n \n return parameters, training_loss", "_____no_output_____" ], [ "def validate(val_set, hidden_state, parameters, epochs=100):\n \n validation_loss = []\n inputs, targets = val_set\n for i in range(epochs):\n epoch_validation_loss = 0\n for x, y in zip(inputs, targets):\n hidden_state = np.zeros_like(hidden_state)\n \n #Forward pass\n outputs, hidden_states = forward_pass(x, hidden_state, parameters)\n \n # Backward pass\n loss, _ = backward_pass(x, outputs, hidden_states, y, parameters)\n if np.isnan(loss):\n raise ValueError('ERROR: Gradients have vanished')\n \n validation_loss.append(epoch_validation_loss/len(val_set))\n \n if i%100 == 0:\n print(f'Epoch {i}, validation loss: {validation_loss[-1]}')\n \n return validation_loss", "_____no_output_____" ], [ "def test(test_set, hidden_state, parameters, idx_to_word):\n inputs, targets = test_set\n results = defaultdict()\n for x in inputs:\n hidden_state = np.zeros_like(hidden_state)\n outputs, hidden_states = forward_pass(x, hidden_state, parameters)\n x_decoded = [ind_to_word[np.argmax(x[i])] for i in range(len(x))]\n y_decoded = [ind_to_word[np.argmax(output)] for output in outputs]\n x_decoded = ('').join(x_decoded)\n y_decoded = ('').join(y_decoded)\n results[x_decoded] = y_decoded\n return results\n ", "_____no_output_____" ], [ "def rnn():\n \n # Constants\n epochs = 100\n hidden_size = 50\n hidden_state = np.zeros((hidden_size, 1))\n \n # Data Preparation\n sequences = generate_dataset()\n word_to_idx, idx_to_word, vocab_size = word_encoding(sequences)\n train_set, test_set, val_set = prepare_data(sequences)\n \n # Data encoding\n train_set = encode_data(train_set, vocab_size, word_to_idx)\n test_set = encode_data(test_set, vocab_size, word_to_idx)\n val_set = encode_data(val_set, vocab_size, word_to_idx)\n \n # Initialize rnn\n parameters = init_rnn(hidden_size, vocab_size)\n training_loss, validation_loss = [], []\n \n # Train\n parameters, training_loss = train(train_set, hidden_state, parameters, epochs)\n \n # Validate\n validation_loss = validate(val_set, hidden_state, parameters, epochs)\n \n # Test\n results = test(test_set, hidden_state, parameters, idx_to_word)\n \n # Print results\n for key in results:\n print(f'Input: {key}, Output: {results[key]}')", "_____no_output_____" ], [ "# rnn()", "_____no_output_____" ], [ "# LSTM", "_____no_output_____" ], [ "def init_lstm(hidden_size, vocab_size):\n \n z_size = hidden_size + vocab_size\n \n # Forget gate\n W_forget = np.zeros((hidden_size, z_size))\n b_forget = np.zeros((hidden_size, 1))\n \n # Update gate\n W_update = np.zeros((hidden_size, z_size))\n b_update = np.zeros((hidden_size, 1))\n \n # Output gate\n W_output = np.zeros((hidden_size, z_size))\n b_output = np.zeros((hidden_size, 1))\n \n # Candidate\n W_g = np.zeros((hidden_size, z_size))\n b_g = np.zeros((hidden_size, 1))\n \n # Output: output = W_v * h(t) + b_v\n W_v = np.zeros((vocab_size, hidden_size))\n b_v = np.zeros((vocab_size, 1))\n \n # Initialize weights\n W_forget = init_orthogonal_weights(W_forget.shape[0], W_forget.shape[1])\n W_update = init_orthogonal_weights(W_update.shape[0], W_update.shape[1])\n W_output = init_orthogonal_weights(W_output.shape[0], W_output.shape[1])\n W_g = init_orthogonal_weights(W_g.shape[0], W_g.shape[1])\n W_v = init_orthogonal_weights(W_v.shape[0], W_v.shape[1])\n \n return W_forget, W_update, W_output, W_g, W_v, b_forget, b_update, b_output, b_g, b_v\n\n ", "_____no_output_____" ], [ "def forward_lstm(inputs, prev_hidden, prev_cell, parameters, activation='softmax'):\n \n # Unpack parameters\n W_forget, W_update, W_output, W_g, W_v, b_forget, b_update, b_output, b_g, b_v = parameters\n \n # Lists for computations to be saved\n inputs_list = []\n forget_gate, update_gate, output_gate = [], [], []\n g_comp, v_comp = [], []\n hidden_state, cell_state = [], []\n outputs_list = []\n \n # Hidden and cell states\n hidden_state.append(prev_hidden)\n cell_state.append(prev_cell)\n \n # Parse through input\n for x in inputs:\n \n # Concatenate input\n z = np.row_stack((prev_hidden, x))\n inputs_list.append(z)\n \n # Forget gate\n f = sigmoid((np.dot(W_forget, z)) + b_forget)\n forget_gate.append(f)\n \n # Update gate (Input gate)\n u = sigmoid((np.dot(W_update, z)) + b_update)\n update_gate.append(u)\n \n # Candidate (g)\n g = tanh(np.dot(W_g, z) + b_g)\n g_comp.append(g)\n \n # Memory state (Cell state)\n c = prev_cell * f + g * u\n cell_state.append(c)\n \n # Output gate\n o = sigmoid((np.dot(W_output, z)) + b_output)\n output_gate.append(o)\n \n # Hidden state\n h = o * tanh(c)\n hidden_state.append(h)\n \n # Calculate Logits (Intermediate step)\n v = np.dot(W_v, prev_hidden) + b_v\n v_comp.append(v)\n \n # Calculate final output\n if activation == 'softmax':\n output = softmax(v)\n outputs_list.append(output)\n \n elif activation == 'linear':\n outputs_list.append(v)\n \n return inputs_list, forget_gate, update_gate, g_comp, cell_state, output_gate, hidden_state, v_comp, outputs_list\n\n ", "_____no_output_____" ], [ "def backward_lstm(computation_lists, targets, parameters):\n \n # Unpack inputs\n inputs_list, forget_gate, update_gate, g_comp, cell_state, output_gate, hidden_state, v_comp, outputs_list = computation_lists\n W_forget, W_update, W_output, W_g, W_v, b_forget, b_update, b_output, b_g, b_v = parameters\n \n # Initialize gradients (as zero) & other variables\n W_f_d = np.zeros_like(W_forget)\n b_f_d = np.zeros_like(b_forget)\n\n W_u_d = np.zeros_like(W_update)\n b_u_d = np.zeros_like(b_update)\n\n W_g_d = np.zeros_like(W_g)\n b_g_d = np.zeros_like(b_g)\n\n W_o_d = np.zeros_like(W_output)\n b_o_d = np.zeros_like(b_output)\n\n W_v_d = np.zeros_like(W_v)\n b_v_d = np.zeros_like(b_v)\n \n d_hidden_prev = np.zeros_like(hidden_state[0])\n d_cell_prev = np.zeros_like(cell_state[0])\n hidden_size = len(hidden_state)\n \n loss = 0\n \n for i in reversed(range(len(outputs_list))):\n \n # Cross entropy\n loss += (cross_entropy_loss(outputs_list[i], targets[i])/len(targets))\n \n # Previous cell state\n prev_cell = cell_state[-1]\n \n # Derivative for v (relation of hidden state to output)\n dv = np.copy(outputs_list[i])\n dv[np.argmax(targets[i])] -= 1\n W_v_d += np.dot(dv, hidden_state[i].T)\n b_v_d += dv\n \n # Derivative for hidden state (h)\n dh = np.dot(W_v.T, dv) \n dh += d_hidden_prev\n \n # Derivative for output (o)\n do = dh * tanh(cell_state[i])\n do = sigmoid(output_gate[i], derivative=True)*do\n W_o_d += np.dot(do, inputs_list[i].T)\n b_o_d += do\n \n # Derivative for cell state (c)\n dC = np.copy(d_cell_prev)\n dC += dh * output_gate[i] * tanh(tanh(cell_state[i]), derivative=True)\n \n # Derivative for candidate (g)\n dg = dC * update_gate[i]\n dg = tanh(g_comp[i], derivative=True) * dg\n W_g_d += np.dot(dg, inputs_list[i].T)\n b_g_d += dg\n \n # Derivative for update gate (input gate)\n du = dC * g_comp[i]\n du = sigmoid(update_gate[i], True) * du\n W_u_d += np.dot(du, inputs_list[i].T)\n b_u_d += du\n \n # Derivative for forget gate (f)\n df = dC * prev_cell\n df = sigmoid(forget_gate[i]) * df\n W_f_d += np.dot(df, inputs_list[i].T)\n b_f_d += df\n \n # Update derivatives of prev cell and hidden states\n dz = (np.dot(W_forget.T, df)\n + np.dot(W_update.T, du)\n + np.dot(W_g.T, dg)\n + np.dot(W_output.T, do))\n d_hidden_prev = dz[:hidden_size, :]\n d_cell_prev = forget_gate[i] * dC\n \n # Clip gradients\n gradients = W_f_d, W_u_d, W_g_d, W_o_d, W_v_d, b_f_d, b_u_d, b_g_d, b_o_d, b_v_d\n gradients = clip_gradient_norm(gradients)\n \n return loss, gradients", "_____no_output_____" ], [ "def train_lstm(train_set, parameters, hidden_size, epochs, activation='softmax'):\n \n training_loss = []\n inputs, targets = train_set\n for i in range(epochs):\n epoch_training_loss = 0\n for x, y in zip(inputs, targets):\n hidden_state = np.zeros((hidden_size, 1))\n cell_state = np.zeros((hidden_size, 1))\n \n # Forward pass\n computation_lists = forward_lstm(x, hidden_state, cell_state, parameters, activation)\n \n # Backward pass\n loss, gradients = backward_lstm(computation_lists, y, parameters)\n \n # Update parameters (optimizer)\n parameters = optimizer(parameters, gradients)\n \n # Update loss\n epoch_training_loss += loss\n \n training_loss.append(epoch_training_loss)\n \n return training_loss, parameters\n \n \n ", "_____no_output_____" ], [ "def validate_lstm(val_set, parameters, hidden_size, epochs, activation='softmax'):\n validation_loss = []\n inputs, targets = val_set\n for i in range(epochs):\n epoch_validation_loss = 0\n for x, y in zip(inputs, targets):\n hidden_state = np.zeros((hidden_size, 1))\n cell_state = np.zeros((hidden_size, 1))\n \n # Forward pass\n computation_lists = forward_lstm(x, hidden_state, cell_state, parameters, activation)\n \n # Backward pass\n loss, gradients = backward_lstm(computation_lists, y, parameters)\n \n # Update loss\n epoch_validation_loss += loss\n \n validation_loss.append(epoch_validation_loss)\n \n return validation_loss", "_____no_output_____" ], [ "def test_lstm(test_set, parameters, hidden_size, ind_to_word=None, activation='softmax'):\n inputs, targets = test_set\n results1 = defaultdict()\n results2 = []\n for x in inputs:\n hidden_state = np.zeros((hidden_size, 1))\n cell_state = np.zeros((hidden_size, 1))\n \n computation_lists = forward_lstm(x, hidden_state, cell_state, parameters, activation)\n inputs_list, forget_gate, update_gate, g_comp, cell_state, output_gate, hidden_state, v_comp, outputs = computation_lists\n\n if ind_to_word:\n x_decoded = [ind_to_word[np.argmax(x[i])] for i in range(len(x))]\n y_decoded = [ind_to_word[np.argmax(output)] for output in outputs]\n x_decoded = ('').join(x_decoded)\n y_decoded = ('').join(y_decoded)\n results1[x_decoded] = y_decoded\n else:\n results2.append(outputs)\n \n if ind_to_word:\n return results1\n else: \n return results2", "_____no_output_____" ], [ "def lstm():\n \n # Data Preparation\n sequences = generate_dataset()\n word_to_idx, idx_to_word, vocab_size = word_encoding(sequences)\n train_set, test_set, val_set = prepare_data(sequences)\n print(vocab_size)\n \n # Data encoding\n train_set = encode_data(train_set, vocab_size, word_to_idx)\n test_set = encode_data(test_set, vocab_size, word_to_idx)\n val_set = encode_data(val_set, vocab_size, word_to_idx)\n \n # Initialize network\n epochs = 20\n hidden_size = 50\n z_size = hidden_size + vocab_size\n parameters = init_lstm(hidden_size, vocab_size)\n \n # Train\n training_loss, parameters = train_lstm(train_set, parameters, hidden_size, epochs, 'softmax')\n \n # Validate\n validation_loss = validate_lstm(val_set, parameters, hidden_size, epochs)\n \n # Test\n results = test_lstm(test_set, parameters, hidden_size, idx_to_word)\n \n # Print results\n for key in results:\n print(f'Input: {key}, Output: {results[key]}')\n\n return train_set", "_____no_output_____" ], [ "# lstm()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a7cae56ef0e342423aa37136f01b3fdd348d05c
105,508
ipynb
Jupyter Notebook
DS_Unit_1_Sprint_Challenge_1.ipynb
macscheffer/DS-Sprint-01-Dealing-With-Data
31e7166b49819a640c45264f84f7ca6b0cba1851
[ "MIT" ]
null
null
null
DS_Unit_1_Sprint_Challenge_1.ipynb
macscheffer/DS-Sprint-01-Dealing-With-Data
31e7166b49819a640c45264f84f7ca6b0cba1851
[ "MIT" ]
null
null
null
DS_Unit_1_Sprint_Challenge_1.ipynb
macscheffer/DS-Sprint-01-Dealing-With-Data
31e7166b49819a640c45264f84f7ca6b0cba1851
[ "MIT" ]
null
null
null
110.827731
18,958
0.769932
[ [ [ "<a href=\"https://colab.research.google.com/github/macscheffer/DS-Sprint-01-Dealing-With-Data/blob/master/DS_Unit_1_Sprint_Challenge_1.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Data Science Unit 1 Sprint Challenge 1\n\n## Loading, cleaning, visualizing, and analyzing data\n\nIn this sprint challenge you will look at a dataset of the survival of patients who underwent surgery for breast cancer.\n\nhttp://archive.ics.uci.edu/ml/datasets/Haberman%27s+Survival\n\nData Set Information:\nThe dataset contains cases from a study that was conducted between 1958 and 1970 at the University of Chicago's Billings Hospital on the survival of patients who had undergone surgery for breast cancer.\n\nAttribute Information:\n1. Age of patient at time of operation (numerical)\n2. Patient's year of operation (year - 1900, numerical)\n3. Number of positive axillary nodes detected (numerical)\n4. Survival status (class attribute)\n-- 1 = the patient survived 5 years or longer\n-- 2 = the patient died within 5 year\n\nSprint challenges are evaluated based on satisfactory completion of each part. It is suggested you work through it in order, getting each aspect reasonably working, before trying to deeply explore, iterate, or refine any given step. Once you get to the end, if you want to go back and improve things, go for it!", "_____no_output_____" ], [ "## Part 1 - Load and validate the data\n\n- Load the data as a `pandas` data frame.\n- Validate that it has the appropriate number of observations (you can check the raw file, and also read the dataset description from UCI).\n- Validate that you have no missing values.\n- Add informative names to the features.\n- The survival variable is encoded as 1 for surviving >5 years and 2 for not - change this to be 0 for not surviving and 1 for surviving >5 years (0/1 is a more traditional encoding of binary variables)\n\nAt the end, print the first five rows of the dataset to demonstrate the above.", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ncols = [\n 'age', # age of patient at the time of operation\n 'operation_year', # year that the operation took place \n 'positive_axillary_nodes', # positive axillary nodes that we're detected.\n 'survival' # Survival status, 1 == the patient survived for >= 5 years after operation, 2 == the patient died within 5 years of the operation\n]\n\ndf = pd.read_csv('http://archive.ics.uci.edu/ml/machine-learning-databases/haberman/haberman.data', names=cols)\n\ndf.shape", "_____no_output_____" ], [ "# the first sum is a method for each series in the df, the second sum adds together the total for each series.\ndf.isna().sum().sum()", "_____no_output_____" ], [ "# replacing 2s in the survival column with 0s, meaning they passed away within 5 years after the surgery.\ndf.survival = df.survival.replace(to_replace=2, value=0)\ndf.head()", "_____no_output_____" ] ], [ [ "## Part 2 - Examine the distribution and relationships of the features\n\nExplore the data - create at least *2* tables (can be summary statistics or crosstabulations) and *2* plots illustrating the nature of the data.\n\nThis is open-ended, so to remind - first *complete* this task as a baseline, then go on to the remaining sections, and *then* as time allows revisit and explore further.\n\nHint - you may need to bin some variables depending on your chosen tables/plots.", "_____no_output_____" ] ], [ [ "# 73.53 % survival rate\ndf.describe()", "_____no_output_____" ], [ "# creating age bucket, and operation year columns\n\nage_bins = pd.cut(df.age, bins=6)\noperation_year_bins = pd.cut(df.operation_year, bins=6)\n\ndf['age_bins'] = age_bins\ndf['operation_year_bins'] = operation_year_bins\n\n# showing the proportion of people in each age bin for each column\n# for example the top right value of 0.588 means that between 1968 and 1969 had only 5% of our youngest patients.\npd.crosstab(df.age_bins,df.operation_year_bins, normalize='index')", "_____no_output_____" ], [ "# in general, as age goes up, survival rates go down. \n\ndf.pivot_table(values='survival', index='age_bins')", "_____no_output_____" ], [ "# as a graph\ndf.pivot_table(values='survival', index='age_bins').plot.bar()", "_____no_output_____" ], [ "# as the year of operation goes up, the trend seems to be volatile, but up\ndf.pivot_table(values='survival', index='operation_year_bins').plot.bar()", "_____no_output_____" ], [ "# messy at lower x values but can generally see there is a slight negative correlation between age and positive_axillary_nodes\nplt.scatter(x=df.positive_axillary_nodes[df.positive_axillary_nodes > 3], y=df.age[df.positive_axillary_nodes > 3])\nplt.xlabel('Positive Axillary Nodes')", "_____no_output_____" ] ], [ [ "## Part 3 - Analysis and Interpretation\n\nNow that you've looked at the data, answer the following questions:\n\n- What is at least one feature that looks to have a positive relationship with survival?\n- What is at least one feature that looks to have a negative relationship with survival?\n- How are those two features related with each other, and what might that mean?\n\nAnswer with text, but feel free to intersperse example code/results or refer to it from earlier.", "_____no_output_____" ], [ "One feature that looks to have a positive relationship with survival is operation year. While both age and positive_axillary_nodes seem to have a negative correlation with survival. \n\nIt's important to note is that age and operation year are correlated. Meaning as the operation year goes up, the patients tend to be older. This may lead to the overall percentage of survivors year over year to look discouraging, ie not going up as fast as researches or people in general would hope. \n\nAnother interesting note, and would need more digging to come to a conclusion, is that age and positive axillary nodes are negatively correlated. Since the positive axillary nodes have the strongest negative and absolute correlation with survival, it may tell us something about our definition of survival. It would need to be thought about in the scope of the question we are asking. ", "_____no_output_____" ] ], [ [ "df.corr()", "_____no_output_____" ], [ "print('The correlation between age and operation year:',round(df.age.corr(df.operation_year),4))\ndf.pivot_table(values='age', index='operation_year').plot.bar()\nplt.title('Average Age in Each Operation Year')\nplt.ylabel('Age')\nplt.show()", "The correlation between age and operation year: 0.0895\n" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ] ]
4a7cc44c9d72bd060c5bd2fc4ebca24772f8ab6b
119,307
ipynb
Jupyter Notebook
notebooks/Chapter 1 - Mining Twitter.ipynb
ohshane71/Mining-the-Social-Web-3rd-Edition
ba2c56ecd28e098f097035e11f417534844d71e1
[ "BSD-2-Clause" ]
null
null
null
notebooks/Chapter 1 - Mining Twitter.ipynb
ohshane71/Mining-the-Social-Web-3rd-Edition
ba2c56ecd28e098f097035e11f417534844d71e1
[ "BSD-2-Clause" ]
null
null
null
notebooks/Chapter 1 - Mining Twitter.ipynb
ohshane71/Mining-the-Social-Web-3rd-Edition
ba2c56ecd28e098f097035e11f417534844d71e1
[ "BSD-2-Clause" ]
null
null
null
81.717123
11,850
0.736135
[ [ [ "# Mining Twitter\n\nTwitter implements OAuth 1.0A as its standard authentication mechanism, and in order to use it to make requests to Twitter's API, you'll need to go to https://developer.twitter.com/en/apps and create a sample application. It is possible that Twitter no longer supports sandboxed applications and you may need to submit a request for permission to develop an app on Twitter.\n\nThere are four primary identifiers you'll need to note for an OAuth 1.0A workflow: consumer key, consumer secret, access token, and access token secret. Note that you will need an ordinary Twitter account in order to login, create an app, and get these credentials.\n\n<img src=\"resources/ch01-twitter/images/Twitter-AppCredentials.png\" width=\"600px\">", "_____no_output_____" ], [ "If you are running this code on Binder or from the Docker container, you should just be able to execute the code in this notebook without any worries whatsoever about installing dependencies. If you are running the code from your own development envioronment, however, be advised that these examples in this chapter take advantage of a Python package called [twitter](https://github.com/sixohsix/twitter) to make API calls. You can install this package in a terminal with [pip](https://pypi.python.org/pypi/pip) with the command `pip install twitter`, preferably from within a [Python virtual environment](https://pypi.python.org/pypi/virtualenv). ", "_____no_output_____" ], [ "Once installed, you should be able to open up a Python interpreter (or better yet, your [IPython](http://ipython.org/) interpreter) and get rolling.", "_____no_output_____" ], [ "## Authorizing an application to access Twitter account data", "_____no_output_____" ] ], [ [ "import twitter\nimport os\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\n# Go to https://developer.twitter.com/en/apps to create an app and get values\n# for these credentials, which you'll need to provide in place of these\n# empty string values that are defined as placeholders.\n# See https://developer.twitter.com/en/docs/basics/authentication/overview/oauth\n# for more information on Twitter's OAuth implementation.\n\nCONSUMER_KEY = os.getenv(\"CONSUMER_KEY\")\nCONSUMER_SECRET = os.getenv(\"CONSUMER_SECRET\")\nOAUTH_TOKEN = os.getenv(\"ACCESS_TOKEN\")\nOAUTH_TOKEN_SECRET = os.getenv(\"ACCESS_TOKEN_SECRET\")\n\nauth = twitter.oauth.OAuth(OAUTH_TOKEN, OAUTH_TOKEN_SECRET,\n CONSUMER_KEY, CONSUMER_SECRET)\n\ntwitter_api = twitter.Twitter(auth=auth)\n\n# Nothing to see by displaying twitter_api except that it's now a\n# defined variable\n\nprint(twitter_api)", "<twitter.api.Twitter object at 0x7f481c49eaf0>\n" ] ], [ [ "## Retrieving trends", "_____no_output_____" ] ], [ [ "# The Yahoo! Where On Earth ID for the entire world is 1.\n# See https://dev.twitter.com/docs/api/1.1/get/trends/place and\n# http://developer.yahoo.com/geo/geoplanet/\n\nWORLD_WOE_ID = 1\nUS_WOE_ID = 23424977\n\n# Prefix ID with the underscore for query string parameterization.\n# Without the underscore, the twitter package appends the ID value\n# to the URL itself as a special case keyword argument.\n\nworld_trends = twitter_api.trends.place(_id=WORLD_WOE_ID)\nus_trends = twitter_api.trends.place(_id=US_WOE_ID)\n\nprint(world_trends)\nprint()\nprint(us_trends)", "[{'trends': [{'name': 'jade picon', 'url': 'http://twitter.com/search?q=%22jade+picon%22', 'promoted_content': None, 'query': '%22jade+picon%22', 'tweet_volume': 32325}, {'name': '#velatobkk', 'url': 'http://twitter.com/search?q=%23velatobkk', 'promoted_content': None, 'query': '%23velatobkk', 'tweet_volume': 78696}, {'name': 'Neymar', 'url': 'http://twitter.com/search?q=Neymar', 'promoted_content': None, 'query': 'Neymar', 'tweet_volume': 85931}, {'name': '#TENLEE_10MPaintMeNaked', 'url': 'http://twitter.com/search?q=%23TENLEE_10MPaintMeNaked', 'promoted_content': None, 'query': '%23TENLEE_10MPaintMeNaked', 'tweet_volume': 56629}, {'name': '#ZETAWIN2021', 'url': 'http://twitter.com/search?q=%23ZETAWIN2021', 'promoted_content': None, 'query': '%23ZETAWIN2021', 'tweet_volume': None}, {'name': 'João Guilherme', 'url': 'http://twitter.com/search?q=%22Jo%C3%A3o+Guilherme%22', 'promoted_content': None, 'query': '%22Jo%C3%A3o+Guilherme%22', 'tweet_volume': 26691}, {'name': 'TwiteraaGt Açıldı', 'url': 'http://twitter.com/search?q=%22TwiteraaGt+A%C3%A7%C4%B1ld%C4%B1%22', 'promoted_content': None, 'query': '%22TwiteraaGt+A%C3%A7%C4%B1ld%C4%B1%22', 'tweet_volume': None}, {'name': 'TEN OUT OF TEN', 'url': 'http://twitter.com/search?q=%22TEN+OUT+OF+TEN%22', 'promoted_content': None, 'query': '%22TEN+OUT+OF+TEN%22', 'tweet_volume': 48408}, {'name': '#ウマ娘で性格診断', 'url': 'http://twitter.com/search?q=%23%E3%82%A6%E3%83%9E%E5%A8%98%E3%81%A7%E6%80%A7%E6%A0%BC%E8%A8%BA%E6%96%AD', 'promoted_content': None, 'query': '%23%E3%82%A6%E3%83%9E%E5%A8%98%E3%81%A7%E6%80%A7%E6%A0%BC%E8%A8%BA%E6%96%AD', 'tweet_volume': None}, {'name': 'ポケカの拡張パック', 'url': 'http://twitter.com/search?q=%E3%83%9D%E3%82%B1%E3%82%AB%E3%81%AE%E6%8B%A1%E5%BC%B5%E3%83%91%E3%83%83%E3%82%AF', 'promoted_content': None, 'query': '%E3%83%9D%E3%82%B1%E3%82%AB%E3%81%AE%E6%8B%A1%E5%BC%B5%E3%83%91%E3%83%83%E3%82%AF', 'tweet_volume': None}, {'name': 'ウマ娘のキャラ', 'url': 'http://twitter.com/search?q=%E3%82%A6%E3%83%9E%E5%A8%98%E3%81%AE%E3%82%AD%E3%83%A3%E3%83%A9', 'promoted_content': None, 'query': '%E3%82%A6%E3%83%9E%E5%A8%98%E3%81%AE%E3%82%AD%E3%83%A3%E3%83%A9', 'tweet_volume': None}, {'name': '大塚明夫さん', 'url': 'http://twitter.com/search?q=%E5%A4%A7%E5%A1%9A%E6%98%8E%E5%A4%AB%E3%81%95%E3%82%93', 'promoted_content': None, 'query': '%E5%A4%A7%E5%A1%9A%E6%98%8E%E5%A4%AB%E3%81%95%E3%82%93', 'tweet_volume': 38044}, {'name': '大塚さん', 'url': 'http://twitter.com/search?q=%E5%A4%A7%E5%A1%9A%E3%81%95%E3%82%93', 'promoted_content': None, 'query': '%E5%A4%A7%E5%A1%9A%E3%81%95%E3%82%93', 'tweet_volume': 54552}, {'name': '#AnonymousBrof', 'url': 'http://twitter.com/search?q=%23AnonymousBrof', 'promoted_content': None, 'query': '%23AnonymousBrof', 'tweet_volume': 11601}, {'name': '古戦場欠席', 'url': 'http://twitter.com/search?q=%E5%8F%A4%E6%88%A6%E5%A0%B4%E6%AC%A0%E5%B8%AD', 'promoted_content': None, 'query': '%E5%8F%A4%E6%88%A6%E5%A0%B4%E6%AC%A0%E5%B8%AD', 'tweet_volume': None}, {'name': '野外音楽イベントの客', 'url': 'http://twitter.com/search?q=%E9%87%8E%E5%A4%96%E9%9F%B3%E6%A5%BD%E3%82%A4%E3%83%99%E3%83%B3%E3%83%88%E3%81%AE%E5%AE%A2', 'promoted_content': None, 'query': '%E9%87%8E%E5%A4%96%E9%9F%B3%E6%A5%BD%E3%82%A4%E3%83%99%E3%83%B3%E3%83%88%E3%81%AE%E5%AE%A2', 'tweet_volume': None}, {'name': '酒類提供', 'url': 'http://twitter.com/search?q=%E9%85%92%E9%A1%9E%E6%8F%90%E4%BE%9B', 'promoted_content': None, 'query': '%E9%85%92%E9%A1%9E%E6%8F%90%E4%BE%9B', 'tweet_volume': 14742}, {'name': '全国最多感染', 'url': 'http://twitter.com/search?q=%E5%85%A8%E5%9B%BD%E6%9C%80%E5%A4%9A%E6%84%9F%E6%9F%93', 'promoted_content': None, 'query': '%E5%85%A8%E5%9B%BD%E6%9C%80%E5%A4%9A%E6%84%9F%E6%9F%93', 'tweet_volume': None}, {'name': '小林さん', 'url': 'http://twitter.com/search?q=%E5%B0%8F%E6%9E%97%E3%81%95%E3%82%93', 'promoted_content': None, 'query': '%E5%B0%8F%E6%9E%97%E3%81%95%E3%82%93', 'tweet_volume': 70669}, {'name': 'angie', 'url': 'http://twitter.com/search?q=angie', 'promoted_content': None, 'query': 'angie', 'tweet_volume': 43974}, {'name': '椎名林檎', 'url': 'http://twitter.com/search?q=%E6%A4%8E%E5%90%8D%E6%9E%97%E6%AA%8E', 'promoted_content': None, 'query': '%E6%A4%8E%E5%90%8D%E6%9E%97%E6%AA%8E', 'tweet_volume': None}, {'name': 'Brasília', 'url': 'http://twitter.com/search?q=Bras%C3%ADlia', 'promoted_content': None, 'query': 'Bras%C3%ADlia', 'tweet_volume': 254244}, {'name': 'ルパン三世', 'url': 'http://twitter.com/search?q=%E3%83%AB%E3%83%91%E3%83%B3%E4%B8%89%E4%B8%96', 'promoted_content': None, 'query': '%E3%83%AB%E3%83%91%E3%83%B3%E4%B8%89%E4%B8%96', 'tweet_volume': 151491}, {'name': 'Xbox 360', 'url': 'http://twitter.com/search?q=%22Xbox+360%22', 'promoted_content': None, 'query': '%22Xbox+360%22', 'tweet_volume': 21327}, {'name': 'まがりなり', 'url': 'http://twitter.com/search?q=%E3%81%BE%E3%81%8C%E3%82%8A%E3%81%AA%E3%82%8A', 'promoted_content': None, 'query': '%E3%81%BE%E3%81%8C%E3%82%8A%E3%81%AA%E3%82%8A', 'tweet_volume': None}, {'name': 'CBCテレビ', 'url': 'http://twitter.com/search?q=CBC%E3%83%86%E3%83%AC%E3%83%93', 'promoted_content': None, 'query': 'CBC%E3%83%86%E3%83%AC%E3%83%93', 'tweet_volume': None}, {'name': 'Brendan', 'url': 'http://twitter.com/search?q=Brendan', 'promoted_content': None, 'query': 'Brendan', 'tweet_volume': 27381}, {'name': '小林清志さん', 'url': 'http://twitter.com/search?q=%E5%B0%8F%E6%9E%97%E6%B8%85%E5%BF%97%E3%81%95%E3%82%93', 'promoted_content': None, 'query': '%E5%B0%8F%E6%9E%97%E6%B8%85%E5%BF%97%E3%81%95%E3%82%93', 'tweet_volume': 40812}, {'name': 'MOONBIN X NEIKIDNIS COLLECTION', 'url': 'http://twitter.com/search?q=%22MOONBIN+X+NEIKIDNIS+COLLECTION%22', 'promoted_content': None, 'query': '%22MOONBIN+X+NEIKIDNIS+COLLECTION%22', 'tweet_volume': 12174}, {'name': 'Sakkari', 'url': 'http://twitter.com/search?q=Sakkari', 'promoted_content': None, 'query': 'Sakkari', 'tweet_volume': None}, {'name': 'Andreescu', 'url': 'http://twitter.com/search?q=Andreescu', 'promoted_content': None, 'query': 'Andreescu', 'tweet_volume': None}, {'name': 'プリンニシテヤルノ', 'url': 'http://twitter.com/search?q=%E3%83%97%E3%83%AA%E3%83%B3%E3%83%8B%E3%82%B7%E3%83%86%E3%83%A4%E3%83%AB%E3%83%8E', 'promoted_content': None, 'query': '%E3%83%97%E3%83%AA%E3%83%B3%E3%83%8B%E3%82%B7%E3%83%86%E3%83%A4%E3%83%AB%E3%83%8E', 'tweet_volume': None}, {'name': '菅首相の退陣巡り発言', 'url': 'http://twitter.com/search?q=%E8%8F%85%E9%A6%96%E7%9B%B8%E3%81%AE%E9%80%80%E9%99%A3%E5%B7%A1%E3%82%8A%E7%99%BA%E8%A8%80', 'promoted_content': None, 'query': '%E8%8F%85%E9%A6%96%E7%9B%B8%E3%81%AE%E9%80%80%E9%99%A3%E5%B7%A1%E3%82%8A%E7%99%BA%E8%A8%80', 'tweet_volume': None}, {'name': '麻生財務相', 'url': 'http://twitter.com/search?q=%E9%BA%BB%E7%94%9F%E8%B2%A1%E5%8B%99%E7%9B%B8', 'promoted_content': None, 'query': '%E9%BA%BB%E7%94%9F%E8%B2%A1%E5%8B%99%E7%9B%B8', 'tweet_volume': None}, {'name': '次元大介', 'url': 'http://twitter.com/search?q=%E6%AC%A1%E5%85%83%E5%A4%A7%E4%BB%8B', 'promoted_content': None, 'query': '%E6%AC%A1%E5%85%83%E5%A4%A7%E4%BB%8B', 'tweet_volume': 233909}, {'name': 'Icardi', 'url': 'http://twitter.com/search?q=Icardi', 'promoted_content': None, 'query': 'Icardi', 'tweet_volume': None}, {'name': 'ムンナイ', 'url': 'http://twitter.com/search?q=%E3%83%A0%E3%83%B3%E3%83%8A%E3%82%A4', 'promoted_content': None, 'query': '%E3%83%A0%E3%83%B3%E3%83%8A%E3%82%A4', 'tweet_volume': None}, {'name': 'bozo', 'url': 'http://twitter.com/search?q=bozo', 'promoted_content': None, 'query': 'bozo', 'tweet_volume': 45116}, {'name': 'Pieper', 'url': 'http://twitter.com/search?q=Pieper', 'promoted_content': None, 'query': 'Pieper', 'tweet_volume': 11497}, {'name': 'Xbox One', 'url': 'http://twitter.com/search?q=%22Xbox+One%22', 'promoted_content': None, 'query': '%22Xbox+One%22', 'tweet_volume': 14678}, {'name': 'Requesting Bazinga', 'url': 'http://twitter.com/search?q=%22Requesting+Bazinga%22', 'promoted_content': None, 'query': '%22Requesting+Bazinga%22', 'tweet_volume': 80830}, {'name': '次元の声', 'url': 'http://twitter.com/search?q=%E6%AC%A1%E5%85%83%E3%81%AE%E5%A3%B0', 'promoted_content': None, 'query': '%E6%AC%A1%E5%85%83%E3%81%AE%E5%A3%B0', 'tweet_volume': 21235}, {'name': 'Ole Miss', 'url': 'http://twitter.com/search?q=%22Ole+Miss%22', 'promoted_content': None, 'query': '%22Ole+Miss%22', 'tweet_volume': 21541}, {'name': 'Mac Miller', 'url': 'http://twitter.com/search?q=%22Mac+Miller%22', 'promoted_content': None, 'query': '%22Mac+Miller%22', 'tweet_volume': 19075}, {'name': 'Esplanada', 'url': 'http://twitter.com/search?q=Esplanada', 'promoted_content': None, 'query': 'Esplanada', 'tweet_volume': 81315}, {'name': '1 Day', 'url': 'http://twitter.com/search?q=%221+Day%22', 'promoted_content': None, 'query': '%221+Day%22', 'tweet_volume': 372201}, {'name': '麺オタク', 'url': 'http://twitter.com/search?q=%E9%BA%BA%E3%82%AA%E3%82%BF%E3%82%AF', 'promoted_content': None, 'query': '%E9%BA%BA%E3%82%AA%E3%82%BF%E3%82%AF', 'tweet_volume': None}, {'name': 'チャンスー', 'url': 'http://twitter.com/search?q=%E3%83%81%E3%83%A3%E3%83%B3%E3%82%B9%E3%83%BC', 'promoted_content': None, 'query': '%E3%83%81%E3%83%A3%E3%83%B3%E3%82%B9%E3%83%BC', 'tweet_volume': None}, {'name': 'Allan Rodríguez', 'url': 'http://twitter.com/search?q=%22Allan+Rodr%C3%ADguez%22', 'promoted_content': None, 'query': '%22Allan+Rodr%C3%ADguez%22', 'tweet_volume': None}, {'name': '元AKBラーメン店主', 'url': 'http://twitter.com/search?q=%E5%85%83AKB%E3%83%A9%E3%83%BC%E3%83%A1%E3%83%B3%E5%BA%97%E4%B8%BB', 'promoted_content': None, 'query': '%E5%85%83AKB%E3%83%A9%E3%83%BC%E3%83%A1%E3%83%B3%E5%BA%97%E4%B8%BB', 'tweet_volume': None}], 'as_of': '2021-09-07T06:23:07Z', 'created_at': '2021-09-05T22:27:21Z', 'locations': [{'name': 'Worldwide', 'woeid': 1}]}]\n\n[{'trends': [{'name': '#BachelorInParadise', 'url': 'http://twitter.com/search?q=%23BachelorInParadise', 'promoted_content': None, 'query': '%23BachelorInParadise', 'tweet_volume': 47837}, {'name': 'Brendan', 'url': 'http://twitter.com/search?q=Brendan', 'promoted_content': None, 'query': 'Brendan', 'tweet_volume': 27381}, {'name': 'Xbox 360', 'url': 'http://twitter.com/search?q=%22Xbox+360%22', 'promoted_content': None, 'query': '%22Xbox+360%22', 'tweet_volume': 21327}, {'name': '#billiejoin', 'url': 'http://twitter.com/search?q=%23billiejoin', 'promoted_content': None, 'query': '%23billiejoin', 'tweet_volume': None}, {'name': 'Xbox One', 'url': 'http://twitter.com/search?q=%22Xbox+One%22', 'promoted_content': None, 'query': '%22Xbox+One%22', 'tweet_volume': 14667}, {'name': 'Ole Miss', 'url': 'http://twitter.com/search?q=%22Ole+Miss%22', 'promoted_content': None, 'query': '%22Ole+Miss%22', 'tweet_volume': 21541}, {'name': 'Mac Miller', 'url': 'http://twitter.com/search?q=%22Mac+Miller%22', 'promoted_content': None, 'query': '%22Mac+Miller%22', 'tweet_volume': 19075}, {'name': 'Louisville', 'url': 'http://twitter.com/search?q=Louisville', 'promoted_content': None, 'query': 'Louisville', 'tweet_volume': 21484}, {'name': 'Sakkari', 'url': 'http://twitter.com/search?q=Sakkari', 'promoted_content': None, 'query': 'Sakkari', 'tweet_volume': None}, {'name': 'Wii U', 'url': 'http://twitter.com/search?q=%22Wii+U%22', 'promoted_content': None, 'query': '%22Wii+U%22', 'tweet_volume': None}, {'name': 'Natasha', 'url': 'http://twitter.com/search?q=Natasha', 'promoted_content': None, 'query': 'Natasha', 'tweet_volume': 27175}, {'name': '#TaxTheChurches', 'url': 'http://twitter.com/search?q=%23TaxTheChurches', 'promoted_content': None, 'query': '%23TaxTheChurches', 'tweet_volume': None}, {'name': 'Blade', 'url': 'http://twitter.com/search?q=Blade', 'promoted_content': None, 'query': 'Blade', 'tweet_volume': 25220}, {'name': '#WWERaw', 'url': 'http://twitter.com/search?q=%23WWERaw', 'promoted_content': None, 'query': '%23WWERaw', 'tweet_volume': 53446}, {'name': 'Sega Genesis', 'url': 'http://twitter.com/search?q=%22Sega+Genesis%22', 'promoted_content': None, 'query': '%22Sega+Genesis%22', 'tweet_volume': None}, {'name': 'George Washington', 'url': 'http://twitter.com/search?q=%22George+Washington%22', 'promoted_content': None, 'query': '%22George+Washington%22', 'tweet_volume': None}, {'name': 'Andreescu', 'url': 'http://twitter.com/search?q=Andreescu', 'promoted_content': None, 'query': 'Andreescu', 'tweet_volume': None}, {'name': '#Sep7Coup', 'url': 'http://twitter.com/search?q=%23Sep7Coup', 'promoted_content': None, 'query': '%23Sep7Coup', 'tweet_volume': 75743}, {'name': 'karl', 'url': 'http://twitter.com/search?q=karl', 'promoted_content': None, 'query': 'karl', 'tweet_volume': 54959}, {'name': 'Junie B Jones', 'url': 'http://twitter.com/search?q=%22Junie+B+Jones%22', 'promoted_content': None, 'query': '%22Junie+B+Jones%22', 'tweet_volume': None}, {'name': 'Michael K. Williams', 'url': 'http://twitter.com/search?q=%22Michael+K.+Williams%22', 'promoted_content': None, 'query': '%22Michael+K.+Williams%22', 'tweet_volume': 466732}, {'name': 'The Wire', 'url': 'http://twitter.com/search?q=%22The+Wire%22', 'promoted_content': None, 'query': '%22The+Wire%22', 'tweet_volume': 153135}, {'name': 'Robert E. Lee', 'url': 'http://twitter.com/search?q=%22Robert+E.+Lee%22', 'promoted_content': None, 'query': '%22Robert+E.+Lee%22', 'tweet_volume': None}, {'name': 'Omar', 'url': 'http://twitter.com/search?q=Omar', 'promoted_content': None, 'query': 'Omar', 'tweet_volume': 187201}, {'name': 'Gameboy Color', 'url': 'http://twitter.com/search?q=%22Gameboy+Color%22', 'promoted_content': None, 'query': '%22Gameboy+Color%22', 'tweet_volume': None}, {'name': 'Atari 2600', 'url': 'http://twitter.com/search?q=%22Atari+2600%22', 'promoted_content': None, 'query': '%22Atari+2600%22', 'tweet_volume': None}, {'name': 'Nintendo Switch', 'url': 'http://twitter.com/search?q=%22Nintendo+Switch%22', 'promoted_content': None, 'query': '%22Nintendo+Switch%22', 'tweet_volume': 23667}, {'name': 'GameCube', 'url': 'http://twitter.com/search?q=GameCube', 'promoted_content': None, 'query': 'GameCube', 'tweet_volume': None}, {'name': 'Scott Baio', 'url': 'http://twitter.com/search?q=%22Scott+Baio%22', 'promoted_content': None, 'query': '%22Scott+Baio%22', 'tweet_volume': None}, {'name': 'Matt Corral', 'url': 'http://twitter.com/search?q=%22Matt+Corral%22', 'promoted_content': None, 'query': '%22Matt+Corral%22', 'tweet_volume': None}, {'name': 'Nintendo DS', 'url': 'http://twitter.com/search?q=%22Nintendo+DS%22', 'promoted_content': None, 'query': '%22Nintendo+DS%22', 'tweet_volume': None}, {'name': 'snoop conner', 'url': 'http://twitter.com/search?q=%22snoop+conner%22', 'promoted_content': None, 'query': '%22snoop+conner%22', 'tweet_volume': None}, {'name': 'Xbox Series X', 'url': 'http://twitter.com/search?q=%22Xbox+Series+X%22', 'promoted_content': None, 'query': '%22Xbox+Series+X%22', 'tweet_volume': 10297}, {'name': 'taemin', 'url': 'http://twitter.com/search?q=taemin', 'promoted_content': None, 'query': 'taemin', 'tweet_volume': 45415}, {'name': 'Super Nintendo', 'url': 'http://twitter.com/search?q=%22Super+Nintendo%22', 'promoted_content': None, 'query': '%22Super+Nintendo%22', 'tweet_volume': None}, {'name': 'Satterfield', 'url': 'http://twitter.com/search?q=Satterfield', 'promoted_content': None, 'query': 'Satterfield', 'tweet_volume': None}, {'name': 'Brooksby', 'url': 'http://twitter.com/search?q=Brooksby', 'promoted_content': None, 'query': 'Brooksby', 'tweet_volume': 16532}, {'name': 'PlayStation 2', 'url': 'http://twitter.com/search?q=%22PlayStation+2%22', 'promoted_content': None, 'query': '%22PlayStation+2%22', 'tweet_volume': None}, {'name': 'Franklin Graham', 'url': 'http://twitter.com/search?q=%22Franklin+Graham%22', 'promoted_content': None, 'query': '%22Franklin+Graham%22', 'tweet_volume': 28402}, {'name': 'DS Lite', 'url': 'http://twitter.com/search?q=%22DS+Lite%22', 'promoted_content': None, 'query': '%22DS+Lite%22', 'tweet_volume': None}, {'name': 'DeJoy', 'url': 'http://twitter.com/search?q=DeJoy', 'promoted_content': None, 'query': 'DeJoy', 'tweet_volume': 34438}, {'name': 'Trey Mancini', 'url': 'http://twitter.com/search?q=%22Trey+Mancini%22', 'promoted_content': None, 'query': '%22Trey+Mancini%22', 'tweet_volume': None}, {'name': 'PS4 Pro', 'url': 'http://twitter.com/search?q=%22PS4+Pro%22', 'promoted_content': None, 'query': '%22PS4+Pro%22', 'tweet_volume': None}, {'name': 'Omos', 'url': 'http://twitter.com/search?q=Omos', 'promoted_content': None, 'query': 'Omos', 'tweet_volume': None}, {'name': 'Djokovic', 'url': 'http://twitter.com/search?q=Djokovic', 'promoted_content': None, 'query': 'Djokovic', 'tweet_volume': 22406}, {'name': 'Nintendo Wii', 'url': 'http://twitter.com/search?q=%22Nintendo+Wii%22', 'promoted_content': None, 'query': '%22Nintendo+Wii%22', 'tweet_volume': None}, {'name': 'Talia', 'url': 'http://twitter.com/search?q=Talia', 'promoted_content': None, 'query': 'Talia', 'tweet_volume': None}, {'name': 'Hyunjin', 'url': 'http://twitter.com/search?q=Hyunjin', 'promoted_content': None, 'query': 'Hyunjin', 'tweet_volume': 119617}, {'name': 'SNES', 'url': 'http://twitter.com/search?q=SNES', 'promoted_content': None, 'query': 'SNES', 'tweet_volume': None}, {'name': 'mr. davis', 'url': 'http://twitter.com/search?q=%22mr.+davis%22', 'promoted_content': None, 'query': '%22mr.+davis%22', 'tweet_volume': None}], 'as_of': '2021-09-07T06:23:08Z', 'created_at': '2021-09-06T12:22:26Z', 'locations': [{'name': 'United States', 'woeid': 23424977}]}]\n" ], [ "for trend in world_trends[0]['trends']:\n print(trend['name'])", "jade picon\n#velatobkk\nNeymar\n#TENLEE_10MPaintMeNaked\n#ZETAWIN2021\nJoão Guilherme\nTwiteraaGt Açıldı\nTEN OUT OF TEN\n#ウマ娘で性格診断\nポケカの拡張パック\nウマ娘のキャラ\n大塚明夫さん\n大塚さん\n#AnonymousBrof\n古戦場欠席\n野外音楽イベントの客\n酒類提供\n全国最多感染\n小林さん\nangie\n椎名林檎\nBrasília\nルパン三世\nXbox 360\nまがりなり\nCBCテレビ\nBrendan\n小林清志さん\nMOONBIN X NEIKIDNIS COLLECTION\nSakkari\nAndreescu\nプリンニシテヤルノ\n菅首相の退陣巡り発言\n麻生財務相\n次元大介\nIcardi\nムンナイ\nbozo\nPieper\nXbox One\nRequesting Bazinga\n次元の声\nOle Miss\nMac Miller\nEsplanada\n1 Day\n麺オタク\nチャンスー\nAllan Rodríguez\n元AKBラーメン店主\n" ], [ "for trend in us_trends[0]['trends']:\n print(trend['name'])", "#BachelorInParadise\nBrendan\nXbox 360\n#billiejoin\nXbox One\nOle Miss\nMac Miller\nLouisville\nSakkari\nWii U\nNatasha\n#TaxTheChurches\nBlade\n#WWERaw\nSega Genesis\nGeorge Washington\nAndreescu\n#Sep7Coup\nkarl\nJunie B Jones\nMichael K. Williams\nThe Wire\nRobert E. Lee\nOmar\nGameboy Color\nAtari 2600\nNintendo Switch\nGameCube\nScott Baio\nMatt Corral\nNintendo DS\nsnoop conner\nXbox Series X\ntaemin\nSuper Nintendo\nSatterfield\nBrooksby\nPlayStation 2\nFranklin Graham\nDS Lite\nDeJoy\nTrey Mancini\nPS4 Pro\nOmos\nDjokovic\nNintendo Wii\nTalia\nHyunjin\nSNES\nmr. davis\n" ], [ "world_trends_set = set([trend['name'] \n for trend in world_trends[0]['trends']])\n\nus_trends_set = set([trend['name'] \n for trend in us_trends[0]['trends']]) \n\ncommon_trends = world_trends_set.intersection(us_trends_set)\n\nprint(common_trends)", "{'Xbox 360', 'Andreescu', 'Mac Miller', 'Brendan', 'Xbox One', 'Ole Miss', 'Sakkari'}\n" ] ], [ [ "## Anatomy of a Tweet", "_____no_output_____" ] ], [ [ "import json\n\n# Set this variable to a trending topic, \n# or anything else for that matter. The example query below\n# was a trending topic when this content was being developed\n# and is used throughout the remainder of this chapter.\n\nq = '#MothersDay' \n\ncount = 100\n\n# Import unquote to prevent url encoding errors in next_results\nfrom urllib.parse import unquote\n\n# See https://dev.twitter.com/rest/reference/get/search/tweets\n\nsearch_results = twitter_api.search.tweets(q=q, count=count)\n\nstatuses = search_results['statuses']\n\n\n# Iterate through 5 more batches of results by following the cursor\nfor _ in range(5):\n print('Length of statuses', len(statuses))\n try:\n next_results = search_results['search_metadata']['next_results']\n except KeyError as e: # No more results when next_results doesn't exist\n break\n \n # Create a dictionary from next_results, which has the following form:\n # ?max_id=847960489447628799&q=%23RIPSelena&count=100&include_entities=1\n kwargs = dict([ kv.split('=') for kv in unquote(next_results[1:]).split(\"&\") ])\n \n search_results = twitter_api.search.tweets(**kwargs)\n statuses += search_results['statuses']\n\n# Show one sample search result by slicing the list...\nprint(json.dumps(statuses[0], indent=1))", "Length of statuses 100\nLength of statuses 200\nLength of statuses 300\nLength of statuses 400\nLength of statuses 500\n{\n \"created_at\": \"Tue Sep 07 06:13:46 +0000 2021\",\n \"id\": 1435124113174007811,\n \"id_str\": \"1435124113174007811\",\n \"text\": \"RT @Forest_Draw: Some game Moms for this #MothersDay \\n\\n#Rosalina #SuperMario #SuperMarioGalaxy #SuperMarioGalaxy2 #MsPacMan #Pacman #Melony\\u2026\",\n \"truncated\": false,\n \"entities\": {\n \"hashtags\": [\n {\n \"text\": \"MothersDay\",\n \"indices\": [\n 41,\n 52\n ]\n },\n {\n \"text\": \"Rosalina\",\n \"indices\": [\n 55,\n 64\n ]\n },\n {\n \"text\": \"SuperMario\",\n \"indices\": [\n 65,\n 76\n ]\n },\n {\n \"text\": \"SuperMarioGalaxy\",\n \"indices\": [\n 77,\n 94\n ]\n },\n {\n \"text\": \"SuperMarioGalaxy2\",\n \"indices\": [\n 95,\n 113\n ]\n },\n {\n \"text\": \"MsPacMan\",\n \"indices\": [\n 114,\n 123\n ]\n },\n {\n \"text\": \"Pacman\",\n \"indices\": [\n 124,\n 131\n ]\n },\n {\n \"text\": \"Melony\",\n \"indices\": [\n 132,\n 139\n ]\n }\n ],\n \"symbols\": [],\n \"user_mentions\": [\n {\n \"screen_name\": \"Forest_Draw\",\n \"name\": \"Uncle Forest (OPEN COMMISSIONS)\",\n \"id\": 1328577335508037632,\n \"id_str\": \"1328577335508037632\",\n \"indices\": [\n 3,\n 15\n ]\n }\n ],\n \"urls\": []\n },\n \"metadata\": {\n \"iso_language_code\": \"en\",\n \"result_type\": \"recent\"\n },\n \"source\": \"<a href=\\\"https://mobile.twitter.com\\\" rel=\\\"nofollow\\\">Twitter Web App</a>\",\n \"in_reply_to_status_id\": null,\n \"in_reply_to_status_id_str\": null,\n \"in_reply_to_user_id\": null,\n \"in_reply_to_user_id_str\": null,\n \"in_reply_to_screen_name\": null,\n \"user\": {\n \"id\": 1288240498365276160,\n \"id_str\": \"1288240498365276160\",\n \"name\": \"mildew votes ndp\",\n \"screen_name\": \"maskedmildew\",\n \"location\": \"canada\",\n \"description\": \"castlevania (game only), samurai, monsters, wrestling and dads. currently into ace attorney too. i draw, check media tab if you'd like. i'm 20+ HE/THEY\",\n \"url\": \"https://t.co/ehnwdhuEf9\",\n \"entities\": {\n \"url\": {\n \"urls\": [\n {\n \"url\": \"https://t.co/ehnwdhuEf9\",\n \"expanded_url\": \"https://ko-fi.com/mild3w\",\n \"display_url\": \"ko-fi.com/mild3w\",\n \"indices\": [\n 0,\n 23\n ]\n }\n ]\n },\n \"description\": {\n \"urls\": []\n }\n },\n \"protected\": false,\n \"followers_count\": 113,\n \"friends_count\": 63,\n \"listed_count\": 0,\n \"created_at\": \"Tue Jul 28 22:30:56 +0000 2020\",\n \"favourites_count\": 9261,\n \"utc_offset\": null,\n \"time_zone\": null,\n \"geo_enabled\": false,\n \"verified\": false,\n \"statuses_count\": 14164,\n \"lang\": null,\n \"contributors_enabled\": false,\n \"is_translator\": false,\n \"is_translation_enabled\": false,\n \"profile_background_color\": \"F5F8FA\",\n \"profile_background_image_url\": null,\n \"profile_background_image_url_https\": null,\n \"profile_background_tile\": false,\n \"profile_image_url\": \"http://pbs.twimg.com/profile_images/1428133151533015045/9FTmQTDu_normal.jpg\",\n \"profile_image_url_https\": \"https://pbs.twimg.com/profile_images/1428133151533015045/9FTmQTDu_normal.jpg\",\n \"profile_banner_url\": \"https://pbs.twimg.com/profile_banners/1288240498365276160/1622608499\",\n \"profile_link_color\": \"1DA1F2\",\n \"profile_sidebar_border_color\": \"C0DEED\",\n \"profile_sidebar_fill_color\": \"DDEEF6\",\n \"profile_text_color\": \"333333\",\n \"profile_use_background_image\": true,\n \"has_extended_profile\": true,\n \"default_profile\": true,\n \"default_profile_image\": false,\n \"following\": false,\n \"follow_request_sent\": false,\n \"notifications\": false,\n \"translator_type\": \"none\",\n \"withheld_in_countries\": []\n },\n \"geo\": null,\n \"coordinates\": null,\n \"place\": null,\n \"contributors\": null,\n \"retweeted_status\": {\n \"created_at\": \"Mon May 10 00:56:30 +0000 2021\",\n \"id\": 1391557725390360581,\n \"id_str\": \"1391557725390360581\",\n \"text\": \"Some game Moms for this #MothersDay \\n\\n#Rosalina #SuperMario #SuperMarioGalaxy #SuperMarioGalaxy2 #MsPacMan #Pacman\\u2026 https://t.co/RDjPN3mTds\",\n \"truncated\": true,\n \"entities\": {\n \"hashtags\": [\n {\n \"text\": \"MothersDay\",\n \"indices\": [\n 24,\n 35\n ]\n },\n {\n \"text\": \"Rosalina\",\n \"indices\": [\n 38,\n 47\n ]\n },\n {\n \"text\": \"SuperMario\",\n \"indices\": [\n 48,\n 59\n ]\n },\n {\n \"text\": \"SuperMarioGalaxy\",\n \"indices\": [\n 60,\n 77\n ]\n },\n {\n \"text\": \"SuperMarioGalaxy2\",\n \"indices\": [\n 78,\n 96\n ]\n },\n {\n \"text\": \"MsPacMan\",\n \"indices\": [\n 97,\n 106\n ]\n },\n {\n \"text\": \"Pacman\",\n \"indices\": [\n 107,\n 114\n ]\n }\n ],\n \"symbols\": [],\n \"user_mentions\": [],\n \"urls\": [\n {\n \"url\": \"https://t.co/RDjPN3mTds\",\n \"expanded_url\": \"https://twitter.com/i/web/status/1391557725390360581\",\n \"display_url\": \"twitter.com/i/web/status/1\\u2026\",\n \"indices\": [\n 116,\n 139\n ]\n }\n ]\n },\n \"metadata\": {\n \"iso_language_code\": \"en\",\n \"result_type\": \"recent\"\n },\n \"source\": \"<a href=\\\"https://mobile.twitter.com\\\" rel=\\\"nofollow\\\">Twitter Web App</a>\",\n \"in_reply_to_status_id\": null,\n \"in_reply_to_status_id_str\": null,\n \"in_reply_to_user_id\": null,\n \"in_reply_to_user_id_str\": null,\n \"in_reply_to_screen_name\": null,\n \"user\": {\n \"id\": 1328577335508037632,\n \"id_str\": \"1328577335508037632\",\n \"name\": \"Uncle Forest (OPEN COMMISSIONS)\",\n \"screen_name\": \"Forest_Draw\",\n \"location\": \"\",\n \"description\": \"Art Account || Pinchi Mexican Beaner || Wacky Cartoonist || Graphic Design Student || KIRBY FAN || Furry on Fridays :3 || ENG/ESP ||\\n\\nAnything can be Fun! \\ud83d\\ude0b\",\n \"url\": null,\n \"entities\": {\n \"description\": {\n \"urls\": []\n }\n },\n \"protected\": false,\n \"followers_count\": 1284,\n \"friends_count\": 422,\n \"listed_count\": 4,\n \"created_at\": \"Tue Nov 17 05:55:04 +0000 2020\",\n \"favourites_count\": 1672,\n \"utc_offset\": null,\n \"time_zone\": null,\n \"geo_enabled\": false,\n \"verified\": false,\n \"statuses_count\": 604,\n \"lang\": null,\n \"contributors_enabled\": false,\n \"is_translator\": false,\n \"is_translation_enabled\": false,\n \"profile_background_color\": \"F5F8FA\",\n \"profile_background_image_url\": null,\n \"profile_background_image_url_https\": null,\n \"profile_background_tile\": false,\n \"profile_image_url\": \"http://pbs.twimg.com/profile_images/1328580634000945152/rvjRwxR9_normal.jpg\",\n \"profile_image_url_https\": \"https://pbs.twimg.com/profile_images/1328580634000945152/rvjRwxR9_normal.jpg\",\n \"profile_banner_url\": \"https://pbs.twimg.com/profile_banners/1328577335508037632/1626047845\",\n \"profile_link_color\": \"1DA1F2\",\n \"profile_sidebar_border_color\": \"C0DEED\",\n \"profile_sidebar_fill_color\": \"DDEEF6\",\n \"profile_text_color\": \"333333\",\n \"profile_use_background_image\": true,\n \"has_extended_profile\": true,\n \"default_profile\": true,\n \"default_profile_image\": false,\n \"following\": false,\n \"follow_request_sent\": false,\n \"notifications\": false,\n \"translator_type\": \"none\",\n \"withheld_in_countries\": []\n },\n \"geo\": null,\n \"coordinates\": null,\n \"place\": null,\n \"contributors\": null,\n \"is_quote_status\": false,\n \"retweet_count\": 56,\n \"favorite_count\": 259,\n \"favorited\": false,\n \"retweeted\": false,\n \"possibly_sensitive\": false,\n \"lang\": \"en\"\n },\n \"is_quote_status\": false,\n \"retweet_count\": 56,\n \"favorite_count\": 0,\n \"favorited\": false,\n \"retweeted\": false,\n \"lang\": \"en\"\n}\n" ], [ "for i in range(10):\n print()\n print(statuses[i]['text'])\n print('Favorites: ', statuses[i]['favorite_count'])\n print('Retweets: ', statuses[i]['retweet_count'])", "\nRT @Forest_Draw: Some game Moms for this #MothersDay \n\n#Rosalina #SuperMario #SuperMarioGalaxy #SuperMarioGalaxy2 #MsPacMan #Pacman #Melony…\nFavorites: 0\nRetweets: 56\n\nRT @I_Love_Shilpa: #मातृदिन 🙏\nWe celebrate #MothersDay on Shravan Amavas. This year it's on Monday 6th Sept.\nSome people think that it's ju…\nFavorites: 0\nRetweets: 12\n\nRT @PRMeena_IAS: #मां का कोई दिन नहीं होता बल्कि #मां से ही दिन होता है। #मातृदिवस की हार्दिक शुभकामनाएं💐💐\n#happymothersday2021 #MothersDay…\nFavorites: 0\nRetweets: 23\n\n#engineering #engineer #technology #didyouknow #facts #calls #mothersday #mothers #science #engineers… https://t.co/ClF0tP3hjo\nFavorites: 0\nRetweets: 0\n\nmothers day gifts from bump | good cheap gifts for mothers day #MothersDay #MothersDay2021 #mugs #mugs… https://t.co/m8FU8STyia\nFavorites: 0\nRetweets: 0\n\nVintage Kitsch Kissing Fish Shakers https://t.co/JcMHN1Kv4W #white #housewarming #mothersday #brown #midcentury… https://t.co/KhZ6YUdFh3\nFavorites: 0\nRetweets: 0\n\nRT @ArorajArvind: Belive in yourself 👍#arvindaroraquotes #arvindarora #amazingquotes #motherhood #mothersday #dailyquotes #dailywatch #dail…\nFavorites: 0\nRetweets: 4\n\nRT @JunkYardBlonde: Vintage Chinoiserie Tea set in Silk Brocade Storage Box 8 Pc Set https://t.co/NHvjItoYv5 #rainbow #housewarming #mother…\nFavorites: 0\nRetweets: 4\n\nRT @thriftyfifty6: Vintage Chunky Nautical Necklace in Gold Tone, 18 Inch Chain W by TreasuringFifty https://t.co/epPSh7lFhp via @Etsy #Nau…\nFavorites: 0\nRetweets: 30\n\nmother's day blanket from daughter | wedding gift ideas from mother to daughter #MothersDay #MothersDay2021… https://t.co/GK58V3qQZI\nFavorites: 0\nRetweets: 0\n" ] ], [ [ "## Extracting text, screen names, and hashtags from tweets", "_____no_output_____" ] ], [ [ "status_texts = [ status['text'] \n for status in statuses ]\n\nscreen_names = [ user_mention['screen_name'] \n for status in statuses\n for user_mention in status['entities']['user_mentions'] ]\n\nhashtags = [ hashtag['text'] \n for status in statuses\n for hashtag in status['entities']['hashtags'] ]\n\n# Compute a collection of all words from all tweets\nwords = [ w \n for t in status_texts \n for w in t.split() ]\n\n# Explore the first 5 items for each...\n\nprint(json.dumps(status_texts[0:5], indent=1))\nprint(json.dumps(screen_names[0:5], indent=1) )\nprint(json.dumps(hashtags[0:5], indent=1))\nprint(json.dumps(words[0:5], indent=1))", "[\n \"RT @Forest_Draw: Some game Moms for this #MothersDay \\n\\n#Rosalina #SuperMario #SuperMarioGalaxy #SuperMarioGalaxy2 #MsPacMan #Pacman #Melony\\u2026\",\n \"RT @I_Love_Shilpa: #\\u092e\\u093e\\u0924\\u0943\\u0926\\u093f\\u0928 \\ud83d\\ude4f\\nWe celebrate #MothersDay on Shravan Amavas. This year it's on Monday 6th Sept.\\nSome people think that it's ju\\u2026\",\n \"RT @PRMeena_IAS: #\\u092e\\u093e\\u0902 \\u0915\\u093e \\u0915\\u094b\\u0908 \\u0926\\u093f\\u0928 \\u0928\\u0939\\u0940\\u0902 \\u0939\\u094b\\u0924\\u093e \\u092c\\u0932\\u094d\\u0915\\u093f #\\u092e\\u093e\\u0902 \\u0938\\u0947 \\u0939\\u0940 \\u0926\\u093f\\u0928 \\u0939\\u094b\\u0924\\u093e \\u0939\\u0948\\u0964 #\\u092e\\u093e\\u0924\\u0943\\u0926\\u093f\\u0935\\u0938 \\u0915\\u0940 \\u0939\\u093e\\u0930\\u094d\\u0926\\u093f\\u0915 \\u0936\\u0941\\u092d\\u0915\\u093e\\u092e\\u0928\\u093e\\u090f\\u0902\\ud83d\\udc90\\ud83d\\udc90\\n#happymothersday2021 #MothersDay\\u2026\",\n \"#engineering #engineer #technology #didyouknow #facts #calls #mothersday #mothers #science #engineers\\u2026 https://t.co/ClF0tP3hjo\",\n \"mothers day gifts from bump | good cheap gifts for mothers day #MothersDay #MothersDay2021 #mugs #mugs\\u2026 https://t.co/m8FU8STyia\"\n]\n[\n \"Forest_Draw\",\n \"I_Love_Shilpa\",\n \"PRMeena_IAS\",\n \"ArorajArvind\",\n \"JunkYardBlonde\"\n]\n[\n \"MothersDay\",\n \"Rosalina\",\n \"SuperMario\",\n \"SuperMarioGalaxy\",\n \"SuperMarioGalaxy2\"\n]\n[\n \"RT\",\n \"@Forest_Draw:\",\n \"Some\",\n \"game\",\n \"Moms\"\n]\n" ] ], [ [ "## Creating a basic frequency distribution from the words in tweets", "_____no_output_____" ] ], [ [ "from collections import Counter\n\nfor item in [words, screen_names, hashtags]:\n c = Counter(item)\n print(c.most_common()[:10]) # top 10\n print()", "[('RT', 213), ('the', 195), ('to', 182), ('.', 147), ('#MothersDay', 122), ('day', 117), ('for', 116), ('#teachersday', 101), ('#mothersday', 98), ('Happy', 96)]\n\n[('ArorajArvind', 12), ('men_are_human', 12), ('I_Love_Shilpa', 11), ('TrasElValle_OBC', 10), ('jjaranaz94', 8), ('thriftyfifty6', 7), ('Etsy', 7), ('renaissancefai', 6), ('creatorzRT', 6), ('DAndalora_Bella', 5)]\n\n[('MothersDay', 130), ('mothersday', 112), ('teachersday', 111), ('teacher', 99), ('teachers', 84), ('happyteachersday', 65), ('mugs', 58), ('MothersDay2021', 57), ('teachersofinstagram', 53), ('vintage', 46)]\n\n" ] ], [ [ "## Using prettytable to display tuples in a nice tabular format", "_____no_output_____" ] ], [ [ "from prettytable import PrettyTable\n\nfor label, data in (('Word', words), \n ('Screen Name', screen_names), \n ('Hashtag', hashtags)):\n pt = PrettyTable(field_names=[label, 'Count']) \n c = Counter(data)\n [ pt.add_row(kv) for kv in c.most_common()[:10] ]\n pt.align[label], pt.align['Count'] = 'l', 'r' # Set column alignment\n print(pt)", "+--------------+-------+\n| Word | Count |\n+--------------+-------+\n| RT | 213 |\n| the | 195 |\n| to | 182 |\n| . | 147 |\n| #MothersDay | 122 |\n| day | 117 |\n| for | 116 |\n| #teachersday | 101 |\n| #mothersday | 98 |\n| Happy | 96 |\n+--------------+-------+\n+-----------------+-------+\n| Screen Name | Count |\n+-----------------+-------+\n| ArorajArvind | 12 |\n| men_are_human | 12 |\n| I_Love_Shilpa | 11 |\n| TrasElValle_OBC | 10 |\n| jjaranaz94 | 8 |\n| thriftyfifty6 | 7 |\n| Etsy | 7 |\n| renaissancefai | 6 |\n| creatorzRT | 6 |\n| DAndalora_Bella | 5 |\n+-----------------+-------+\n+---------------------+-------+\n| Hashtag | Count |\n+---------------------+-------+\n| MothersDay | 130 |\n| mothersday | 112 |\n| teachersday | 111 |\n| teacher | 99 |\n| teachers | 84 |\n| happyteachersday | 65 |\n| mugs | 58 |\n| MothersDay2021 | 57 |\n| teachersofinstagram | 53 |\n| vintage | 46 |\n+---------------------+-------+\n" ] ], [ [ "## Calculating lexical diversity for tweets", "_____no_output_____" ] ], [ [ "# A function for computing lexical diversity\ndef lexical_diversity(tokens):\n return len(set(tokens))/len(tokens) \n\n# A function for computing the average number of words per tweet\ndef average_words(statuses):\n total_words = sum([ len(s.split()) for s in statuses ]) \n return total_words/len(statuses)\n\nprint(lexical_diversity(words))\nprint(lexical_diversity(screen_names))\nprint(lexical_diversity(hashtags))\nprint(average_words(status_texts))", "0.3299768401973618\n0.54\n0.21353166986564298\n16.551666666666666\n" ] ], [ [ "## Finding the most popular retweets", "_____no_output_____" ] ], [ [ "retweets = [\n # Store out a tuple of these three values ...\n (status['retweet_count'], \n status['retweeted_status']['user']['screen_name'],\n status['retweeted_status']['id'],\n status['text']) \n \n # ... for each status ...\n for status in statuses \n \n # ... so long as the status meets this condition.\n if 'retweeted_status' in status.keys()\n ]\n\n# Slice off the first 5 from the sorted results and display each item in the tuple\n\npt = PrettyTable(field_names=['Count', 'Screen Name', 'Tweet ID', 'Text'])\n[ pt.add_row(row) for row in sorted(retweets, reverse=True)[:5] ]\npt.max_width['Text'] = 50\npt.align= 'l'\nprint(pt)", "+-------+----------------+---------------------+----------------------------------------------------+\n| Count | Screen Name | Tweet ID | Text |\n+-------+----------------+---------------------+----------------------------------------------------+\n| 13655 | blkgirlculture | 1259486190505426952 | RT @blkgirlculture: These Black mamas really said |\n| | | | Copy and paste |\n| | | | |\n| | | | Happy #MothersDay 💫 https://t.co/fDgczno9hk |\n| 2739 | ohmthitiwat | 1293548053811441664 | RT @ohmthitiwat: สุขสันต์วันแม่นะครับ |\n| | | | ขอให้แม่มีสุขภาพแข็งแรง มีความสุขมากๆ |\n| | | | โอห์มจะเป็นเด็กดี รักแม่ทุกวันนะครับ💙 #MothersDay |\n| | | | https://t.co/… |\n| 2671 | insan_honey | 1391319125163577345 | RT @insan_honey: माँ! |\n| | | | तू है तो हिम्मत है, |\n| | | | तू है तो जन्नत है, |\n| | | | तू है तो रहमत है, |\n| | | | हमारे सारे आँसू पी लेती है, होठों को हँसी देती है, |\n| | | | हर लम्हे… |\n| 2671 | insan_honey | 1391319125163577345 | RT @insan_honey: माँ! |\n| | | | तू है तो हिम्मत है, |\n| | | | तू है तो जन्नत है, |\n| | | | तू है तो रहमत है, |\n| | | | हमारे सारे आँसू पी लेती है, होठों को हँसी देती है, |\n| | | | हर लम्हे… |\n| 1998 | gaysony24 | 1259631832070569986 | RT @gaysony24: Momma Bakugou is not impressed with |\n| | | | Katsuki’s growth spurt. #MothersDay |\n| | | | https://t.co/ClKaZ1cU7I |\n+-------+----------------+---------------------+----------------------------------------------------+\n" ] ], [ [ "## Looking up users who have retweeted a status", "_____no_output_____" ] ], [ [ "# Get the original tweet id for a tweet from its retweeted_status node \n# and insert it here\n\n_retweets = twitter_api.statuses.retweets(id=862359093398261760)\nprint([r['user']['screen_name'] for r in _retweets])", "['vijoumolce1988', 'driftourare1988', 'NoNicheNC', 'livingmybestie', 'Rockawaytheday']\n" ] ], [ [ "## Plotting frequencies of words", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n%matplotlib inline\nword_counts = sorted(Counter(words).values(), reverse=True)\n\nplt.loglog(word_counts)\nplt.ylabel(\"Freq\")\nplt.xlabel(\"Word Rank\")", "_____no_output_____" ] ], [ [ "## Generating histograms of words, screen names, and hashtags", "_____no_output_____" ] ], [ [ "for label, data in (('Words', words), \n ('Screen Names', screen_names), \n ('Hashtags', hashtags)):\n\n # Build a frequency map for each set of data\n # and plot the values\n c = Counter(data)\n plt.hist(list(c.values()))\n \n # Add a title and y-label ...\n plt.title(label)\n plt.ylabel(\"Number of items in bin\")\n plt.xlabel(\"Bins (number of times an item appeared)\")\n \n # ... and display as a new figure\n plt.figure()", "_____no_output_____" ] ], [ [ "## Generating a histogram of retweet counts", "_____no_output_____" ] ], [ [ "# Using underscores while unpacking values in\n# a tuple is idiomatic for discarding them\n\ncounts = [count for count, _, _, _ in retweets]\n\nplt.hist(counts)\nplt.title('Retweets')\nplt.xlabel('Bins (number of times retweeted)')\nplt.ylabel('Number of tweets in bin')", "_____no_output_____" ] ], [ [ "## Sentiment Analysis", "_____no_output_____" ] ], [ [ "# pip install nltk\nimport nltk\nnltk.download('vader_lexicon')\n\nimport numpy as np\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer", "[nltk_data] Downloading package vader_lexicon to\n[nltk_data] /home/shaneoh/nltk_data...\n[nltk_data] Package vader_lexicon is already up-to-date!\n" ], [ "twitter_stream = twitter.TwitterStream(auth=auth)\niterator = twitter_stream.statuses.sample()", "_____no_output_____" ], [ "tweets = []\nfor tweet in iterator:\n try:\n if tweet['lang'] == 'en':\n tweets.append(tweet)\n except:\n pass\n if len(tweets) == 100:\n break", "_____no_output_____" ], [ "analyzer = SentimentIntensityAnalyzer()", "_____no_output_____" ], [ "analyzer.polarity_scores('Hello')", "_____no_output_____" ], [ "analyzer.polarity_scores('I really enjoy this video series.')", "_____no_output_____" ], [ "analyzer.polarity_scores('I REALLY enjoy this video series.')", "_____no_output_____" ], [ "analyzer.polarity_scores('I REALLY enjoy this video series!!!')", "_____no_output_____" ], [ "analyzer.polarity_scores('I REALLY did not enjoy this video series!!!')", "_____no_output_____" ], [ "scores = np.zeros(len(tweets))\n\nfor i, t in enumerate(tweets):\n # Extract the text portion of the tweet\n text = t['text']\n \n # Measure the polarity of the tweet\n polarity = analyzer.polarity_scores(text)\n \n # Store the normalized, weighted composite score\n scores[i] = polarity['compound']", "_____no_output_____" ], [ "most_positive = np.argmax(scores)\nmost_negative = np.argmin(scores)", "_____no_output_____" ], [ "print('{0:6.3f} : \"{1}\"'.format(scores[most_positive], tweets[most_positive]['text']))", " 0.848 : \"RT @Kavin_Karthik97: Happy Birthday @mammukka sir ❤️ have a blockbuster year ahead 😊 behalf wishes from @Kavin_m_0431 fans 🤗 \n\n#HappyBirthd…\"\n" ], [ "print('{0:6.3f} : \"{1}\"'.format(scores[most_negative], tweets[most_negative]['text']))", "-0.773 : \"RT @earringdealer1: The downfall of Nasty Gal is still so crazy to me bc to this day nobody has been able to provide what Sophia Amoruso br…\"\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a7cd72a94fa01bdba322d88affd15bcfacbdb31
31,908
ipynb
Jupyter Notebook
examples/naive_bayes_example.ipynb
sourcery-ai-bot/SeaLion
1deae028e591338dd7e74788da63486576515192
[ "MIT" ]
1
2021-03-02T21:01:38.000Z
2021-03-02T21:01:38.000Z
examples/naive_bayes_example.ipynb
sourcery-ai-bot/SeaLion
1deae028e591338dd7e74788da63486576515192
[ "MIT" ]
null
null
null
examples/naive_bayes_example.ipynb
sourcery-ai-bot/SeaLion
1deae028e591338dd7e74788da63486576515192
[ "MIT" ]
1
2021-03-02T21:01:39.000Z
2021-03-02T21:01:39.000Z
59.752809
14,528
0.693995
[ [ [ "\"\"\"\nToday we will be looking at the 2 Naive Bayes classification algorithms SeaLion has to offer - gaussian and multinomial (more common).\nBoth of them use the same underlying principles and as usual we'll explain them step by step. \n\"\"\"\n\n# first import\nimport sealion as sl \nfrom sealion.naive_bayes import GaussianNaiveBayes, MultinomialNaiveBayes", "_____no_output_____" ], [ "\"\"\"\nWe'll first start with gaussian naive bayes. The way it works is by creating a normal (gaussian) curve to measure the\nprobability of any certain feature occuring for a given class. It looks at the probability for a feature to be on \neach class possible. The way it makes its predictions on a given data point is by just looking at the probability of\neach feature in the point for each class, and as it after aggregating all of the probabilities for all of the features\nwill predict the class with the highest probability. \n\"\"\"\n\n# we will use the iris dataset for this\nfrom sklearn.datasets import load_iris\nX, y = load_iris()['data'], load_iris()['target']\n\n# and let's split\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.4, random_state = 3) # another thing to note : \n# with naive bayes, try to always have as balanced data for all classes as possible. ", "_____no_output_____" ], [ "# we can now setup the model\ngnb = GaussianNaiveBayes() \ngnb.fit(X_train, y_train) # fit the model", "_____no_output_____" ], [ "gnb.evaluate(X_test, y_test) # we can evaluate it", "_____no_output_____" ], [ "# WOAH! Looks like we do pretty well with this model. Let's see how much we got wrong.\ny_pred = gnb.predict(X_test)\ny_pred == y_test ", "_____no_output_____" ], [ "# 1 wrong. Super simple, right? \n\n# onto multinomial naive bayes\n\n\"\"\"\nMultinomial Naive Bayes is a type of naive bayes that will work with stuff like text classification, where you have \na dataset where each observation/data point is just a word. This could look like : [\"hello\", \"what\", \"do\", \"you\", \"want\", \"from\", \"me\"]\nfor a given data point. Each feature is the exact same here, so what if a model could look split all data into its classes, \nand then see the probability of finding a feature (i.e. \"hello\") for that class. For example if you have a dataset of 100 emails,\n50 spam and 50 ham - you can split the 100 into a dataset of 50 spam and 50 ham and then count the number of \ntimes \"hello\" and all other features show up in each of those 50 class-datasets (doesn't matter where.) Then if you are given a new\ndata point you can see the probability of seeing each of its features for each class, and choose the class with the\nhighest probability. This is the underlying idea behind multinomial naive bayes. \n\"\"\"\n\n# let's get started\n# the spam dataset is available here : https://www.kaggle.com/uciml/sms-spam-collection-dataset\nimport pandas as pd\nspam_df = pd.read_csv(\"spam.csv\", engine = \"python\", encoding='ISO-8859-1') # we need to manually define the encoding\nspam_df # print it out", "_____no_output_____" ], [ "# as usual data manipulation is honestly not as fun as the algorithms, so we're going to have to get our hands dirty\nX, y = spam_df['v2'], spam_df['v1']\nX, y # let's print this stuff out", "_____no_output_____" ], [ "# it looks like we have plenty of data \n# the first step is tokenize, where we take those strings in each data point and turn them into unique numbers. This\n# will apply throughout, so \"hello\" as 100 in one data point is the same for another\n\nVOCAB_SIZE = 10000 # we allow 10000 words\nfrom tensorflow.keras.preprocessing.text import Tokenizer\ntokenizer = Tokenizer(num_words = VOCAB_SIZE)\ntokenizer.fit_on_texts(X)\nX_seq = tokenizer.texts_to_sequences(X)\n\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\n# we'll also want to pad it, meaning that we make sure everything is the same length\nX_pad = pad_sequences(X_seq, maxlen = 100, truncating = \"post\", padding = \"post\")", "_____no_output_____" ], [ "# and we will want to split it up now\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\ny = np.array(y)\ny[np.where(y == \"ham\")] = 0 \ny[np.where(y == \"spam\")] = 1 # spam is 1\n\nX_train, X_test, y_train, y_test = train_test_split(X_pad, y, test_size = 0.15, random_state = 3)", "_____no_output_____" ], [ "# let's print out X_train\nX_train", "_____no_output_____" ], [ "# time to start using Multinomial Naive Bayes\nmnb = MultinomialNaiveBayes()\nmnb.fit(X_train, y_train)", "_____no_output_____" ], [ "# time to evaluate\nmnb.evaluate(X_test, y_test)", "_____no_output_____" ], [ "# dang ... but hmmm is it just predicting 0s? Is that why? \nmnb.predict(X_test)[:10]", "_____no_output_____" ], [ "# looks like it did phenomenal. And of course, we're going to use a confusion matrix. \nfrom sealion.utils import confusion_matrix\nconfusion_matrix(mnb.predict(X_test), y_test)", "_____no_output_____" ], [ "# The only thing we get wrong is thinking something is fine when its not. I think that's better than \n# the opposite, where you miss something important and it goes into your spam folder...\n\n# Look's like that's the end for us. As usual, I hope you enjoyed this tutorial!", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a7cd9e5f447e0edc0ced7c48c4900a9a621f98e
84,819
ipynb
Jupyter Notebook
DSPT7_LESSON_Unit_2_Sprint_1_Module_1.ipynb
Sid-Oya/DS-Unit-2-Linear-Models
37d999dc203a76890a7d261d500227900c4dce2e
[ "MIT" ]
null
null
null
DSPT7_LESSON_Unit_2_Sprint_1_Module_1.ipynb
Sid-Oya/DS-Unit-2-Linear-Models
37d999dc203a76890a7d261d500227900c4dce2e
[ "MIT" ]
null
null
null
DSPT7_LESSON_Unit_2_Sprint_1_Module_1.ipynb
Sid-Oya/DS-Unit-2-Linear-Models
37d999dc203a76890a7d261d500227900c4dce2e
[ "MIT" ]
null
null
null
52.260628
8,644
0.538959
[ [ [ "<a href=\"https://colab.research.google.com/github/Sid-Oya/DS-Unit-2-Linear-Models/blob/master/DSPT7_LESSON_Unit_2_Sprint_1_Module_1.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "Lambda School Data Science\n\n*Unit 2, Sprint 1, Module 1*\n\n---", "_____no_output_____" ], [ "# Regression 1\n\n- Begin with baselines for regression\n- Use scikit-learn to fit a linear regression\n- Explain the coefficients from a linear regression", "_____no_output_____" ], [ "Brandon Rohrer wrote a good blog post, [“What questions can machine learning answer?”](https://brohrer.github.io/five_questions_data_science_answers.html)\n\nWe’ll focus on two of these questions in Unit 2. These are both types of “supervised learning.”\n\n- “How Much / How Many?” (Regression)\n- “Is this A or B?” (Classification)\n\nThis unit, you’ll build supervised learning models with “tabular data” (data in tables, like spreadsheets). Including, but not limited to:\n\n- Predict New York City real estate prices <-- **Today, we'll start this!**\n- Predict which water pumps in Tanzania need repairs\n- Choose your own labeled, tabular dataset, train a predictive model, and publish a blog post or web app with visualizations to explain your model!", "_____no_output_____" ], [ "### Setup\n\nRun the code cell below. You can work locally (follow the [local setup instructions](https://lambdaschool.github.io/ds/unit2/local/)) or on Colab.\n\nLibraries:\n\n- ipywidgets\n- pandas\n- plotly\n- scikit-learn\n\nIf your **Plotly** visualizations aren't working:\n- You must have JavaScript enabled in your browser\n- You probably want to use Chrome or Firefox\n- You may need to turn off ad blockers\n- [If you're using Jupyter Lab locally, you need to install some \"extensions\"](https://plot.ly/python/getting-started/#jupyterlab-support-python-35)", "_____no_output_____" ] ], [ [ "import sys\n\n# If you're on Colab:\nif 'google.colab' in sys.modules:\n DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'\n\n# If you're working locally:\nelse:\n DATA_PATH = '../data/'\n \n# Ignore this Numpy warning when using Plotly Express:\n# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.\nimport warnings\nwarnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')", "_____no_output_____" ] ], [ [ "# Begin with baselines for regression", "_____no_output_____" ], [ "## Overview", "_____no_output_____" ], [ "### Predict how much a NYC condo costs 🏠💸\n\nRegression models output continuous numbers, so we can use regression to answer questions like \"How much?\" or \"How many?\" \n\nOften, the question is \"How much will this cost? How many dollars?\"", "_____no_output_____" ], [ "For example, here's a fun YouTube video, which we'll use as our scenario for this lesson:\n\n[Amateurs & Experts Guess How Much a NYC Condo With a Private Terrace Costs](https://www.youtube.com/watch?v=JQCctBOgH9I)\n\n> Real Estate Agent Leonard Steinberg just sold a pre-war condo in New York City's Tribeca neighborhood. We challenged three people - an apartment renter, an apartment owner and a real estate expert - to try to guess how much the apartment sold for. Leonard reveals more and more details to them as they refine their guesses.", "_____no_output_____" ], [ "The condo from the video is **1,497 square feet**, built in 1852, and is in a desirable neighborhood. According to the real estate agent, _\"Tribeca is known to be one of the most expensive ZIP codes in all of the United States of America.\"_\n\nHow can we guess what this condo sold for? Let's look at 3 methods:\n\n1. Heuristics\n2. Descriptive Statistics\n3. Predictive Model ", "_____no_output_____" ], [ "## Follow Along", "_____no_output_____" ], [ "### 1. Heuristics\n\nHeuristics are \"rules of thumb\" that people use to make decisions and judgments. The video participants discussed their heuristics:\n\n\n", "_____no_output_____" ], [ "**Participant 1**, Chinwe, is a real estate amateur. She rents her apartment in New York City. Her first guess was `8 million, and her final guess was 15 million.\n\n[She said](https://youtu.be/JQCctBOgH9I?t=465), _\"People just go crazy for numbers like 1852. You say **'pre-war'** to anyone in New York City, they will literally sell a kidney. They will just give you their children.\"_ ", "_____no_output_____" ], [ "**Participant 3**, Pam, is an expert. She runs a real estate blog. Her first guess was 1.55 million, and her final guess was 2.2 million.\n\n[She explained](https://youtu.be/JQCctBOgH9I?t=280) her first guess: _\"I went with a number that I think is kind of the going rate in the location, and that's **a thousand bucks a square foot.**\"_", "_____no_output_____" ], [ "**Participant 2**, Mubeen, is between the others in his expertise level. He owns his apartment in New York City. His first guess was 1.7 million, and his final guess was also 2.2 million.", "_____no_output_____" ], [ "### 2. Descriptive Statistics", "_____no_output_____" ], [ "We can use data to try to do better than these heuristics. How much have other Tribeca condos sold for?\n\nLet's answer this question with a relevant dataset, containing most of the single residential unit, elevator apartment condos sold in Tribeca, from January through April 2019.\n\nWe can get descriptive statistics for the dataset's `SALE_PRICE` column.\n\nHow many condo sales are in this dataset? What was the average sale price? The median? Minimum? Maximum?", "_____no_output_____" ] ], [ [ "import pandas as pd\ndf = pd.read_csv(DATA_PATH+'condos/tribeca.csv')", "_____no_output_____" ], [ "pd.options.display.float_format = '{:,.0f}'.format\ndf['SALE_PRICE'].describe()", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\nimport seaborn as sns\n\nsns.distplot(df['SALE_PRICE'], kde=False);\nplt.axvline(df['SALE_PRICE'].mean(), color='blue')\nplt.axvline(df['SALE_PRICE'].median(), color='red')", "_____no_output_____" ] ], [ [ "On average, condos in Tribeca have sold for \\$3.9 million. So that could be a reasonable first guess.\n\nIn fact, here's the interesting thing: **we could use this one number as a \"prediction\", if we didn't have any data except for sales price...** \n\nImagine we didn't have any any other information about condos, then what would you tell somebody? If you had some sales prices like this but you didn't have any of these other columns. If somebody asked you, \"How much do you think a condo in Tribeca costs?\"\n\nYou could say, \"Well, I've got 90 sales prices here, and I see that on average they cost \\$3.9 million.\"\n\nSo we do this all the time in the real world. We use descriptive statistics for prediction. And that's not wrong or bad, in fact **that's where you should start. This is called the _mean baseline_.**", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ] ], [ [ "**Baseline** is an overloaded term, with multiple meanings:\n\n1. [**The score you'd get by guessing**](https://twitter.com/koehrsen_will/status/1088863527778111488)\n2. [**Fast, first models that beat guessing**](https://blog.insightdatascience.com/always-start-with-a-stupid-model-no-exceptions-3a22314b9aaa) \n3. **Complete, tuned \"simpler\" model** (Simpler mathematically, computationally. Or less work for you, the data scientist.)\n4. **Minimum performance that \"matters\"** to go to production and benefit your employer and the people you serve.\n5. **Human-level performance** \n\nBaseline type #1 is what we're doing now.\n\n(Linear models can be great for #2, 3, 4, and [sometimes even #5 too!](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.188.5825))", "_____no_output_____" ], [ "---\n\nLet's go back to our mean baseline for Tribeca condos. \n\nIf we just guessed that every Tribeca condo sold for \\$3.9 million, how far off would we be, on average?", "_____no_output_____" ] ], [ [ "guess = df['SALE_PRICE'].mean()\n# guess = 15000000", "_____no_output_____" ], [ "errors = guess - df['SALE_PRICE']", "_____no_output_____" ], [ "mean_absolute_error = errors.abs().mean()", "_____no_output_____" ], [ "print(f'If we just guessed every Tribeca condo sold for ${guess:,.0f},')\nprint(f'we would be off by ${mean_absolute_error:,.0f} on average.')", "If we just guessed every Tribeca condo sold for $3,928,736,\nwe would be off by $2,783,380 on average.\n" ] ], [ [ "That sounds like a lot of error! \n\nBut fortunately, we can do better than this first baseline — we can use more data. For example, the condo's size.\n\nCould sale price be **dependent** on square feet? To explore this relationship, let's make a scatterplot, using [Plotly Express](https://plot.ly/python/plotly-express/):", "_____no_output_____" ] ], [ [ "import plotly.express as px\npx.scatter(df, x='GROSS_SQUARE_FEET', y='SALE_PRICE')", "_____no_output_____" ] ], [ [ "### 3. Predictive Model\n\nTo go from a _descriptive_ [scatterplot](https://www.plotly.express/plotly_express/#plotly_express.scatter) to a _predictive_ regression, just add a _line of best fit:_", "_____no_output_____" ] ], [ [ "px.scatter(df, x='GROSS_SQUARE_FEET', y='SALE_PRICE', trendline='ols')", "_____no_output_____" ] ], [ [ "Roll over the Plotly regression line to see its equation and predictions for sale price, dependent on gross square feet.\n\nLinear Regression helps us **interpolate.** For example, in this dataset, there's a gap between 4016 sq ft and 4663 sq ft. There were no 4300 sq ft condos sold, but what price would you predict, using this line of best fit?\n\nLinear Regression also helps us **extrapolate.** For example, in this dataset, there were no 6000 sq ft condos sold, but what price would you predict?", "_____no_output_____" ], [ "The line of best fit tries to summarize the relationship between our x variable and y variable in a way that enables us to use the equation for that line to make predictions.\n\n\n\n", "_____no_output_____" ], [ "**Synonyms for \"y variable\"**\n\n- **Dependent Variable**\n- Response Variable\n- Outcome Variable \n- Predicted Variable\n- Measured Variable\n- Explained Variable\n- **Label**\n- **Target**", "_____no_output_____" ], [ "**Synonyms for \"x variable\"**\n\n- **Independent Variable**\n- Explanatory Variable\n- Regressor\n- Covariate\n- Correlate\n- **Feature**\n", "_____no_output_____" ], [ "The bolded terminology will be used most often by your instructors this unit.", "_____no_output_____" ], [ "## Challenge\n\nIn your assignment, you will practice how to begin with baselines for regression, using a new dataset!", "_____no_output_____" ], [ "# Use scikit-learn to fit a linear regression", "_____no_output_____" ], [ "## Overview", "_____no_output_____" ], [ "We can use visualization libraries to do simple linear regression (\"simple\" means there's only one independent variable). \n\nBut during this unit, we'll usually use the scikit-learn library for predictive models, and we'll usually have multiple independent variables.", "_____no_output_____" ], [ "In [_Python Data Science Handbook,_ Chapter 5.2: Introducing Scikit-Learn](https://jakevdp.github.io/PythonDataScienceHandbook/05.02-introducing-scikit-learn.html#Basics-of-the-API), Jake VanderPlas explains **how to structure your data** for scikit-learn:\n\n> The best way to think about data within Scikit-Learn is in terms of tables of data. \n>\n> ![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.02-samples-features.png)\n>\n>The features matrix is often stored in a variable named `X`. The features matrix is assumed to be two-dimensional, with shape `[n_samples, n_features]`, and is most often contained in a NumPy array or a Pandas `DataFrame`.\n>\n>We also generally work with a label or target array, which by convention we will usually call `y`. The target array is usually one dimensional, with length `n_samples`, and is generally contained in a NumPy array or Pandas `Series`. The target array may have continuous numerical values, or discrete classes/labels. \n>\n>The target array is the quantity we want to _predict from the data:_ in statistical terms, it is the dependent variable. ", "_____no_output_____" ], [ "VanderPlas also lists a **5 step process** for scikit-learn's \"Estimator API\":\n\n> Every machine learning algorithm in Scikit-Learn is implemented via the Estimator API, which provides a consistent interface for a wide range of machine learning applications.\n>\n> Most commonly, the steps in using the Scikit-Learn estimator API are as follows:\n>\n> 1. Choose a class of model by importing the appropriate estimator class from Scikit-Learn.\n> 2. Choose model hyperparameters by instantiating this class with desired values.\n> 3. Arrange data into a features matrix and target vector following the discussion above.\n> 4. Fit the model to your data by calling the `fit()` method of the model instance.\n> 5. Apply the Model to new data: For supervised learning, often we predict labels for unknown data using the `predict()` method.\n\nLet's try it!", "_____no_output_____" ], [ "## Follow Along\n\nFollow the 5 step process, and refer to [Scikit-Learn LinearRegression documentation](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html).", "_____no_output_____" ] ], [ [ "# 1. Import the appropriate estimator class from Scikit-Learn\n\nfrom sklearn.linear_model import LinearRegression", "_____no_output_____" ], [ "# 2. Instantiate this class\n\nmodel = LinearRegression()", "_____no_output_____" ], [ "# 3. Arrange X features matrix & y target vector\n\nfeatures = ['GROSS_SQUARE_FEET', 'YEAR_BUILT']\n# features = ['GROSS_SQUARE_FEET']\n\ntarget = ['SALE_PRICE']\n\nx_train = df[features]\ny_train = df[target]\n\nx_train.shape, y_train.shape", "_____no_output_____" ], [ "x_train", "_____no_output_____" ], [ "# 4. Fit the model\n\nmodel.fit(x_train, y_train)", "_____no_output_____" ], [ "y_train", "_____no_output_____" ], [ "# 5. Apply the model to new data\n\nsquare_feet = 1497\nyear_built = 1852\n\nx_test = [[ square_feet ]]\n\ny_pred = model.predict(x_test)\ny_pred", "_____no_output_____" ] ], [ [ "So, we used scikit-learn to fit a linear regression, and predicted the sales price for a 1,497 square foot Tribeca condo, like the one from the video.\n\nNow, what did that condo actually sell for? ___The final answer is revealed in [the video at 12:28](https://youtu.be/JQCctBOgH9I?t=748)!___", "_____no_output_____" ] ], [ [ "y_test = [2800000]", "_____no_output_____" ] ], [ [ "What was the error for our prediction, versus the video participants?\n\nLet's use [scikit-learn's mean absolute error function](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_absolute_error.html).", "_____no_output_____" ] ], [ [ "chinwe_final_guess = [15000000]\nmubeen_final_guess = [2200000]\npam_final_guess = [2200000]", "_____no_output_____" ], [ "from sklearn.metrics import mean_absolute_error\n\nmae = mean_absolute_error(y_test, y_pred)\nprint (\"Mean Absolute Error of our model\", mae)", "Mean Absolute Error of our model 300078.0993036949\n" ] ], [ [ "This [diagram](https://ogrisel.github.io/scikit-learn.org/sklearn-tutorial/tutorial/text_analytics/general_concepts.html#supervised-learning-model-fit-x-y) shows what we just did! Don't worry about understanding it all now. But can you start to match some of these boxes/arrows to the corresponding lines of code from above?\n\n<img src=\"https://ogrisel.github.io/scikit-learn.org/sklearn-tutorial/_images/plot_ML_flow_chart_12.png\" width=\"75%\">", "_____no_output_____" ], [ "Here's [another diagram](https://livebook.manning.com/book/deep-learning-with-python/chapter-1/), which shows how machine learning is a \"new programming paradigm\":\n\n<img src=\"https://pbs.twimg.com/media/ECQDlFOWkAEJzlY.jpg\" width=\"70%\">\n\n> A machine learning system is \"trained\" rather than explicitly programmed. It is presented with many \"examples\" relevant to a task, and it finds statistical structure in these examples which eventually allows the system to come up with rules for automating the task. —[Francois Chollet](https://livebook.manning.com/book/deep-learning-with-python/chapter-1/)", "_____no_output_____" ], [ "Wait, are we saying that *linear regression* could be considered a *machine learning algorithm*? Maybe it depends? What do you think? We'll discuss throughout this unit.", "_____no_output_____" ], [ "## Challenge\n\nIn your assignment, you will use scikit-learn for linear regression with one feature. For a stretch goal, you can do linear regression with two or more features.", "_____no_output_____" ], [ "# Explain the coefficients from a linear regression", "_____no_output_____" ], [ "## Overview\n\nWhat pattern did the model \"learn\", about the relationship between square feet & price?", "_____no_output_____" ], [ "## Follow Along", "_____no_output_____" ], [ "To help answer this question, we'll look at the `coef_` and `intercept_` attributes of the `LinearRegression` object. (Again, [here's the documentation](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html).)\n", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ] ], [ [ "We can repeatedly apply the model to new/unknown data, and explain the coefficient:", "_____no_output_____" ] ], [ [ "def predict(square_feet):\n y_pred = model.predict([[square_feet]])\n estimate = y_pred[0]\n coefficient = model.coef_[0]\n print ('Estimated price for', square_feet, 'square feet is', estimate)\n print ('The coefficient (cost per square foot) is', coefficient)\n # result = f'${estimate:,.0f} estimated price for {square_feet:,.0f} square foot condo in Tribeca.'\n # explanation = f'In this linear regression, each additional square foot adds ${coefficient:,.0f}.'\n # return result + '\\n' + explanation\n\nprint(predict(1497));", "Estimated price for 1497 square feet is [3100078.09930369]\nThe coefficient (cost per square foot) is [3076.44765542]\nNone\n" ], [ "# What does the model predict for low square footage?\nprint(predict(500))", "Estimated price for 500 is [32859.78685095]\nNone\n" ], [ "# For high square footage?\nprint(predict(10000))", "Estimated price for 10000 is [29259112.51333147]\nNone\n" ], [ "", "_____no_output_____" ] ], [ [ "## Challenge\n\nIn your assignment, you will define a function to make new predictions and explain the model coefficient.", "_____no_output_____" ], [ "# Review", "_____no_output_____" ], [ "You'll practice these objectives when you do your assignment:\n\n- Begin with baselines for regression\n- Use scikit-learn to fit a linear regression\n- Make new predictions and explain coefficients", "_____no_output_____" ], [ "You'll use another New York City real estate dataset. You'll predict how much it costs to rent an apartment, instead of how much it costs to buy a condo.\n\nYou've been provided with a separate notebook for your assignment, which has all the instructions and stretch goals. Good luck and have fun!", "_____no_output_____" ], [ "# Sources\n\n#### NYC Real Estate\n- Video: [Amateurs & Experts Guess How Much a NYC Condo With a Private Terrace Costs](https://www.youtube.com/watch?v=JQCctBOgH9I)\n- Data: [NYC OpenData: NYC Citywide Rolling Calendar Sales](https://data.cityofnewyork.us/dataset/NYC-Citywide-Rolling-Calendar-Sales/usep-8jbt)\n- Glossary: [NYC Department of Finance: Rolling Sales Data](https://www1.nyc.gov/site/finance/taxes/property-rolling-sales-data.page)\n\n#### Baselines\n- Will Koehrsen, [\"One of the most important steps in a machine learning project is establishing a common sense baseline...\"](https://twitter.com/koehrsen_will/status/1088863527778111488)\n- Emmanuel Ameisen, [Always start with a stupid model, no exceptions](https://blog.insightdatascience.com/always-start-with-a-stupid-model-no-exceptions-3a22314b9aaa)\n- Robyn M. Dawes, [The robust beauty of improper linear models in decision making](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.188.5825)\n\n#### Plotly Express\n- [Plotly Express](https://plot.ly/python/plotly-express/) examples\n- [plotly_express.scatter](https://www.plotly.express/plotly_express/#plotly_express.scatter) docs\n\n#### Scikit-Learn\n- Francois Chollet, [Diagram](https://livebook.manning.com/book/deep-learning-with-python/chapter-1/)\n- Jake VanderPlas, [_Python Data Science Handbook,_ Chapter 5.2: Introducing Scikit-Learn](https://jakevdp.github.io/PythonDataScienceHandbook/05.02-introducing-scikit-learn.html#Basics-of-the-API)\n- Olvier Grisel, [Diagram](https://ogrisel.github.io/scikit-learn.org/sklearn-tutorial/tutorial/text_analytics/general_concepts.html#supervised-learning-model-fit-x-y)\n- [sklearn.linear_model.LinearRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html)\n- [sklearn.metrics.mean_absolute_error](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_absolute_error.html)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
4a7d0f0ce6102b8a63997e885e5bb05044ac51aa
497,848
ipynb
Jupyter Notebook
notebook/tutorial_02_display_gaia_edr3_sources.ipynb
xr0038/jasmine_warpfield
d3dc8306c30c955eea997e7cb69c1910df6a9515
[ "MIT" ]
null
null
null
notebook/tutorial_02_display_gaia_edr3_sources.ipynb
xr0038/jasmine_warpfield
d3dc8306c30c955eea997e7cb69c1910df6a9515
[ "MIT" ]
7
2021-07-04T07:07:34.000Z
2021-09-09T05:22:09.000Z
notebook/tutorial_02_display_gaia_edr3_sources.ipynb
xr0038/jasmine_warpfield
d3dc8306c30c955eea997e7cb69c1910df6a9515
[ "MIT" ]
null
null
null
2,928.517647
249,444
0.966769
[ [ [ "# Tutorial: display Gaia EDR3 sources\n\nThis notebook demonstrates that Gaia EDR3 sources are shown on the sky.", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport pickle as pkl\nimport warpfield as w", "_____no_output_____" ], [ "from astropy.coordinates import SkyCoord, Longitude, Latitude\nimport astropy.units as u", "_____no_output_____" ], [ "lon = Longitude(269.267, unit=u.degree)\nlat = Latitude(-18.985, unit=u.degree)\npointing = SkyCoord(lon, lat, frame='icrs')", "_____no_output_____" ] ], [ [ "The retrieved Gaia EDR3 data are loaded from the pickled file.", "_____no_output_____" ] ], [ [ "with open('gaia_edr3.pkl','rb') as f:\n gaia_sources = pkl.load(f)", "_____no_output_____" ], [ "print(gaia_sources)", "<SkyCoord (ICRS): (ra, dec, distance) in (deg, deg, pc)\n [(269.98167082, -18.57869516, 1224.69071274),\n (269.97802152, -18.57559889, 324.47533736),\n (269.9835042 , -18.56314468, 1386.3884802 ), ...,\n (268.95304902, -19.79798206, 1453.99297353),\n (268.94090109, -19.80146811, 1743.04794159),\n (268.94291844, -19.79621657, 961.61213642)]\n (pm_ra_cosdec, pm_dec) in mas / yr\n [( -3.47320156, -6.59719102), (-21.33238793, 19.44839316),\n ( 2.41198999, -0.88761999), ..., ( -0.14121417, 4.28021171),\n ( -0.65096113, -8.71104282), ( -1.00190969, 0.6318477 )]>\n" ] ], [ [ "Use `display_sources` function. The first argument is the center of the field of view. The second argument is the source list. The x-, and y-coordinates are specified by the coordinate frame of the first argument. Here, `pointing` is defined in the `icrs` frame. The figure is shown in the (RA, Dec) coordinates.", "_____no_output_____" ] ], [ [ "ax = w.display_sources(pointing, gaia_sources, title=\"Gaia EDR3 sources (2016.0) in ICRS fame\")", "_____no_output_____" ] ], [ [ "When the first argument is given in a different frame, the coordinates of the figure are changed accordingly. The `pointing` is converted into the `galactic` frame in the following example. The figure is shown in the (Glon, Glat) coordinates.", "_____no_output_____" ] ], [ [ "ax = w.display_sources(pointing.galactic, gaia_sources, title=\"Gaia EDR3 sources (2016.0) in Galactic frame\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a7d101f912625e78bd78cfb9d5b852abb5082fd
362,101
ipynb
Jupyter Notebook
FE_xVA_code.ipynb
Wee7/FinancialEngineering_IR_xVA
cb29982b3c6b7c86480a2a9f01326b48c2539e7c
[ "Apache-2.0" ]
null
null
null
FE_xVA_code.ipynb
Wee7/FinancialEngineering_IR_xVA
cb29982b3c6b7c86480a2a9f01326b48c2539e7c
[ "Apache-2.0" ]
null
null
null
FE_xVA_code.ipynb
Wee7/FinancialEngineering_IR_xVA
cb29982b3c6b7c86480a2a9f01326b48c2539e7c
[ "Apache-2.0" ]
null
null
null
1,025.78187
113,726
0.939619
[ [ [ "<a href=\"https://colab.research.google.com/github/Wee7/FinancialEngineering_IR_xVA/blob/main/FE_xVA_code.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Lecture 02- Understanding of Filtrations and Measures", "_____no_output_____" ] ], [ [ "#%% Martingale.py\n\"\"\"\nCreated on July 05 2021\nSimulation of, E(W(t)|F(s)) = W(s) using nested Monte Carlo\n\nThis code is purely educational and comes from \"Financial Engineering\" course by L.A. Grzelak\nThe course is based on the book “Mathematical Modeling and Computation\nin Finance: With Exercises and Python and MATLAB Computer Codes”,\nby C.W. Oosterlee and L.A. Grzelak, World Scientific Publishing Europe Ltd, 2019.\n@author: Lech A. Grzelak\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nt = 10 \ns = 5\nNoOfPaths=1000\nNoOfSteps=10\n\n# First part to caclulate E(W(t)|F(0)) = W(0)=0\ndef martingaleA():\n W_t = np.random.normal(0.0,pow(t,0.5),[NoOfPaths,1])\n E_W_t = np.mean(W_t)\n print(\"mean value equals to: %.2f while the expected value is W(0) =%0.2f \" %(E_W_t,0.0))\n \n# Second part requiring nested Monte Carlo simulation E(W(t)|F(s)) = W(s)\ndef martingaleB(): \n Z = np.random.normal(0.0,1.0,[NoOfPaths,NoOfSteps])\n W = np.zeros([NoOfPaths,NoOfSteps+1])\n \n # time-step from [t0,s]\n dt1 = s / float(NoOfSteps)\n for i in range(0,NoOfSteps):\n # making sure that samples from normal have mean 0 and variance 1\n Z[:,i] = (Z[:,i] - np.mean(Z[:,i])) / np.std(Z[:,i])\n W[:,i+1] = W[:,i] + pow(dt1,0.5)*Z[:,i]\n \n #W_s is the last column of W\n W_s = W[:,-1]\n #for every path W(s) we perform sub-simulation until time t and calculate\n #the expectation\n # time-step from [s,t]\n dt2 = (t-s)/float(NoOfSteps);\n W_t = np.zeros([NoOfPaths,NoOfSteps+1]);\n \n #Store the results\n E_W_t = np.zeros([NoOfPaths])\n Error=[]\n for i in range(0,NoOfPaths):\n #Sub-simulation from time \"s\" until \"t\"\n W_t[:,0] = W_s[i];\n Z = np.random.normal(0.0,1.0,[NoOfPaths,NoOfSteps])\n for j in range(0,NoOfSteps):\n #this is a scaling that ensures that Z has mean 0 and variance 1\n Z[:,j] = (Z[:,j]-np.mean(Z[:,j])) / np.std(Z[:,j]);\n #path simulation, from \"s\" until \"t\"\n W_t[:,j+1] = W_t[:,j] + pow(dt2,0.5)*Z[:,j]; \n \n E_W_t[i]=np.mean(W_t[:,-1])\n Error.append(E_W_t[i]-W_s[i])\n \n #Generate a plot for the first path\n if i==0:\n plt.plot(np.linspace(0,s,NoOfSteps+1),W[0,:])\n for j in range(0,NoOfPaths):\n plt.plot(np.linspace(s,t,NoOfSteps+1),W_t[j,:])\n plt.xlabel(\"time\")\n plt.ylabel(\"W(t)\")\n plt.grid()\n \n print(Error)\n error = np.max(np.abs(E_W_t-W_s))\n print(\"The error is equal to: %.18f\"%(error))\n \nmartingaleB()\n ", "[4.440892098500626e-16, -1.3877787807814457e-17, 0.0, 0.0, 0.0, -2.220446049250313e-16, 0.0, -8.881784197001252e-16, -8.881784197001252e-16, 0.0, 0.0, 0.0, 0.0, 0.0, -4.440892098500626e-16, 0.0, 0.0, 0.0, 0.0, -2.220446049250313e-16, -2.220446049250313e-16, 2.0816681711721685e-17, 0.0, 2.220446049250313e-16, 0.0, -5.551115123125783e-17, 0.0, 0.0, -6.245004513516506e-17, -1.1102230246251565e-16, -4.440892098500626e-16, 0.0, 0.0, 2.220446049250313e-16, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.1102230246251565e-16, -4.440892098500626e-16, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.1102230246251565e-16, -4.440892098500626e-16, 0.0, 0.0, 2.220446049250313e-16, 2.7755575615628914e-17, 0.0, 0.0, 0.0, -1.1102230246251565e-16, 0.0, 4.85722573273506e-17, 8.881784197001252e-16, 0.0, 0.0, -1.1102230246251565e-16, -2.220446049250313e-16, 0.0, -1.1102230246251565e-16, 0.0, 5.551115123125783e-17, 0.0, 0.0, 0.0, 0.0, 5.551115123125783e-17, 2.220446049250313e-16, 0.0, 0.0, 4.440892098500626e-16, -1.1102230246251565e-16, 0.0, 0.0, 0.0, 4.440892098500626e-16, -2.7755575615628914e-17, 0.0, 0.0, 0.0, 2.220446049250313e-16, -8.881784197001252e-16, -4.440892098500626e-16, 0.0, 0.0, 0.0, 4.440892098500626e-16, -4.440892098500626e-16, 0.0, -1.3877787807814457e-17, 2.7755575615628914e-17, 0.0, -8.881784197001252e-16, -5.551115123125783e-17, 0.0, 4.440892098500626e-16, 0.0, 0.0, 0.0, 4.440892098500626e-16, 0.0, 0.0, 4.440892098500626e-16, -5.551115123125783e-17, 0.0, 0.0, 0.0, 0.0, 0.0, -5.551115123125783e-17, 0.0, 0.0, 4.440892098500626e-16, 0.0, 0.0, 0.0, 0.0, 0.0, 4.163336342344337e-17, 0.0, 0.0, -2.220446049250313e-16, 0.0, 0.0, -4.440892098500626e-16, 0.0, 0.0, -4.440892098500626e-16, 0.0, -1.1102230246251565e-16, 4.440892098500626e-16, 0.0, 5.551115123125783e-17, 0.0, -1.1102230246251565e-16, 0.0, -2.220446049250313e-16, 0.0, -5.551115123125783e-17, 4.440892098500626e-16, 0.0, 4.440892098500626e-16, 0.0, 0.0, 0.0, 0.0, 2.220446049250313e-16, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.220446049250313e-16, 0.0, -5.551115123125783e-17, 0.0, 2.220446049250313e-16, 0.0, 0.0, -4.440892098500626e-16, -1.1102230246251565e-16, 0.0, 1.1102230246251565e-16, 1.1102230246251565e-16, 0.0, -5.551115123125783e-17, -1.1102230246251565e-16, -1.1102230246251565e-16, 0.0, 0.0, 2.220446049250313e-16, 0.0, 0.0, -2.220446049250313e-16, -8.881784197001252e-16, 4.440892098500626e-16, 2.220446049250313e-16, 0.0, -8.326672684688674e-17, 5.551115123125783e-17, 0.0, 2.220446049250313e-16, 2.220446049250313e-16, 0.0, 0.0, -4.440892098500626e-16, -4.440892098500626e-16, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.1102230246251565e-16, -1.1102230246251565e-16, 0.0, 0.0, -1.1102230246251565e-16, 0.0, 0.0, 2.220446049250313e-16, 0.0, 0.0, 4.440892098500626e-16, 6.938893903907228e-17, -4.440892098500626e-16, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.440892098500626e-16, 0.0, 0.0, 0.0, 0.0, 5.551115123125783e-17, 0.0, 0.0, -2.220446049250313e-16, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.1102230246251565e-16, 0.0, 0.0, -2.220446049250313e-16, 0.0, 0.0, 0.0, 0.0, 0.0, 5.551115123125783e-17, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.440892098500626e-16, 0.0, -4.440892098500626e-16, -4.440892098500626e-16, 0.0, 0.0, -8.881784197001252e-16, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -2.220446049250313e-16, 0.0, 2.7755575615628914e-17, 0.0, -5.551115123125783e-17, 0.0, 0.0, -2.7755575615628914e-17, -3.469446951953614e-17, -2.220446049250313e-16, 0.0, -1.3877787807814457e-17, 4.440892098500626e-16, -5.551115123125783e-17, 0.0, -6.938893903907228e-17, 2.220446049250313e-16, -5.551115123125783e-17, 0.0, 4.440892098500626e-16, -4.440892098500626e-16, 0.0, 8.326672684688674e-17, 2.220446049250313e-16, -2.220446049250313e-16, 0.0, 0.0, 0.0, 0.0, -2.220446049250313e-16, 0.0, 0.0, -2.220446049250313e-16, 0.0, 2.220446049250313e-16, 0.0, -2.220446049250313e-16, 0.0, -2.220446049250313e-16, 0.0, -4.440892098500626e-16, 1.1102230246251565e-16, 0.0, -1.1102230246251565e-16, 0.0, -4.163336342344337e-17, 0.0, 0.0, -2.220446049250313e-16, -4.440892098500626e-16, 0.0, 5.551115123125783e-17, 0.0, 0.0, 0.0, 2.220446049250313e-16, 1.1102230246251565e-16, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.1102230246251565e-16, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.440892098500626e-16, 0.0, 0.0, -5.551115123125783e-17, 0.0, 4.440892098500626e-16, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.440892098500626e-16, -4.440892098500626e-16, 0.0, 0.0, 0.0, -2.220446049250313e-16, 1.1102230246251565e-16, 0.0, 0.0, 0.0, 0.0, 0.0, -2.7755575615628914e-17, 0.0, 0.0, -5.551115123125783e-17, 0.0, 0.0, -4.440892098500626e-16, -4.440892098500626e-16, 0.0, 1.1102230246251565e-16, 0.0, 0.0, 0.0, 2.7755575615628914e-17, 0.0, 0.0, 0.0, -1.1102230246251565e-16, -1.1102230246251565e-16, -1.1102230246251565e-16, 2.220446049250313e-16, 0.0, 0.0, -4.440892098500626e-16, 0.0, 0.0, 0.0, 0.0, -2.220446049250313e-16, 0.0, -2.7755575615628914e-17, 4.440892098500626e-16, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.220446049250313e-16, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -5.551115123125783e-17, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.440892098500626e-16, 0.0, 4.440892098500626e-16, -4.440892098500626e-16, 4.440892098500626e-16, -1.1102230246251565e-16, 0.0, 5.551115123125783e-17, 0.0, 5.551115123125783e-17, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.1102230246251565e-16, -3.469446951953614e-17, 0.0, -2.220446049250313e-16, 0.0, -2.220446049250313e-16, -2.220446049250313e-16, 5.551115123125783e-17, -8.881784197001252e-16, 0.0, 0.0, -2.7755575615628914e-17, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -8.881784197001252e-16, -6.938893903907228e-18, 0.0, -8.326672684688674e-17, 0.0, 0.0, 0.0, 0.0, -2.220446049250313e-16, 0.0, 0.0, 0.0, 0.0, 4.440892098500626e-16, 0.0, 0.0, 0.0, 0.0, -2.7755575615628914e-17, -4.440892098500626e-16, 8.881784197001252e-16, -5.551115123125783e-17, 5.551115123125783e-17, -2.220446049250313e-16, 0.0, 0.0, -4.440892098500626e-16, 0.0, 0.0, -2.220446049250313e-16, -4.163336342344337e-17, 4.440892098500626e-16, 0.0, 0.0, 0.0, 0.0, 0.0, -2.220446049250313e-16, 0.0, 5.551115123125783e-17, 0.0, 0.0, -5.551115123125783e-17, -4.163336342344337e-17, 0.0, 1.1102230246251565e-16, 4.440892098500626e-16, 5.551115123125783e-17, 0.0, 0.0, 1.1102230246251565e-16, 0.0, -2.220446049250313e-16, 0.0, 2.220446049250313e-16, 4.440892098500626e-16, -4.440892098500626e-16, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -8.326672684688674e-17, 0.0, 0.0, 0.0, 0.0, 5.551115123125783e-17, 1.1102230246251565e-16, -5.551115123125783e-17, 1.1102230246251565e-16, 0.0, 0.0, 0.0, -4.163336342344337e-17, -4.440892098500626e-16, -4.440892098500626e-16, 0.0, 0.0, -1.1102230246251565e-16, -1.1102230246251565e-16, 0.0, 0.0, 0.0, 1.1102230246251565e-16, 0.0, 5.551115123125783e-17, 0.0, 5.551115123125783e-17, 0.0, 0.0, 0.0, -6.418476861114186e-17, -1.1102230246251565e-16, 1.1102230246251565e-16, 1.1102230246251565e-16, 0.0, -4.440892098500626e-16, -2.220446049250313e-16, -1.1102230246251565e-16, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 8.881784197001252e-16, 0.0, 4.440892098500626e-16, 0.0, 0.0, 0.0, 2.220446049250313e-16, 0.0, 0.0, -8.881784197001252e-16, -2.220446049250313e-16, 0.0, 0.0, 4.440892098500626e-16, 0.0, 0.0, 8.881784197001252e-16, -5.551115123125783e-17, 0.0, 0.0, 4.440892098500626e-16, 0.0, 5.551115123125783e-17, 0.0, 0.0, -2.220446049250313e-16, -2.7755575615628914e-17, -2.220446049250313e-16, 0.0, 2.7755575615628914e-17, 0.0, 0.0, 0.0, 2.220446049250313e-16, 0.0, 5.551115123125783e-17, -2.220446049250313e-16, 0.0, 5.551115123125783e-17, 3.469446951953614e-17, 0.0, -4.440892098500626e-16, -1.1102230246251565e-16, -2.220446049250313e-16, 0.0, 0.0, 0.0, 0.0, 0.0, 1.1102230246251565e-16, 0.0, 1.1102230246251565e-16, 0.0, 0.0, -8.881784197001252e-16, 0.0, 0.0, 0.0, -1.1102230246251565e-16, 1.1102230246251565e-16, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 8.881784197001252e-16, 1.1102230246251565e-16, 0.0, 0.0, 4.5102810375396984e-17, 0.0, 0.0, -4.440892098500626e-16, 2.0816681711721685e-17, 0.0, 0.0, 0.0, 5.551115123125783e-17, -2.220446049250313e-16, 0.0, 0.0, 2.7755575615628914e-17, 0.0, 0.0, 4.163336342344337e-17, 0.0, 0.0, 0.0, -4.440892098500626e-16, 0.0, 8.881784197001252e-16, 0.0, 8.881784197001252e-16, 0.0, -1.1102230246251565e-16, 1.1102230246251565e-16, 5.551115123125783e-17, 0.0, 0.0, -5.551115123125783e-17, 0.0, 0.0, 0.0, -1.1102230246251565e-16, 0.0, 0.0, 0.0, -1.1102230246251565e-16, -2.220446049250313e-16, 0.0, 1.1102230246251565e-16, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -5.551115123125783e-17, 0.0, 0.0, 4.440892098500626e-16, 0.0, 0.0, -2.7755575615628914e-17, -4.440892098500626e-16, -1.1102230246251565e-16, 5.551115123125783e-17, 5.551115123125783e-17, 0.0, 0.0, 2.220446049250313e-16, 1.1102230246251565e-16, -5.551115123125783e-17, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.440892098500626e-16, -2.220446049250313e-16, 0.0, 1.1102230246251565e-16, 0.0, 0.0, 2.220446049250313e-16, 0.0, 0.0, 5.551115123125783e-17, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.440892098500626e-16, 0.0, 2.220446049250313e-16, 0.0, 4.440892098500626e-16, -4.440892098500626e-16, 0.0, -4.440892098500626e-16, 4.440892098500626e-16, 0.0, 4.440892098500626e-16, -4.440892098500626e-16, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -2.220446049250313e-16, 0.0, -8.326672684688674e-17, 0.0, 0.0, -2.220446049250313e-16, 0.0, -2.220446049250313e-16, 0.0, -5.551115123125783e-17, -2.220446049250313e-16, 0.0, 0.0, -4.440892098500626e-16, 5.551115123125783e-17, 0.0, -2.220446049250313e-16, 0.0, 0.0, -7.979727989493313e-17, 0.0, 4.440892098500626e-16, 0.0, 5.551115123125783e-17, 0.0, 0.0, 0.0, 1.1102230246251565e-16, 0.0, 2.220446049250313e-16, 0.0, 2.220446049250313e-16, -5.551115123125783e-17, 4.440892098500626e-16, -1.3877787807814457e-17, 0.0, 4.440892098500626e-16, -2.220446049250313e-16, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.440892098500626e-16, 5.551115123125783e-17, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -2.7755575615628914e-17, -5.551115123125783e-17, 0.0, 4.440892098500626e-16, 0.0, 8.881784197001252e-16, 2.7755575615628914e-17, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 5.551115123125783e-17, 0.0, 0.0, 0.0, 0.0, -5.551115123125783e-17, 4.440892098500626e-16, 0.0, 0.0, 5.551115123125783e-17, 0.0, 4.440892098500626e-16, 1.1102230246251565e-16, -2.220446049250313e-16, 0.0, 0.0, -2.220446049250313e-16, -4.440892098500626e-16, 4.440892098500626e-16, 0.0, -4.440892098500626e-16, 5.551115123125783e-17, 0.0, 0.0, 0.0, -4.440892098500626e-16, -1.1102230246251565e-16, -8.881784197001252e-16, -1.1102230246251565e-16, 0.0, 0.0, 2.220446049250313e-16, 0.0, 5.551115123125783e-17, 0.0, 0.0, 0.0, -4.440892098500626e-16, 5.551115123125783e-17, 5.551115123125783e-17, -5.551115123125783e-17, 0.0, -2.7755575615628914e-17, 0.0, 0.0, -1.1102230246251565e-16, 5.551115123125783e-17, 0.0, 5.551115123125783e-17, -2.220446049250313e-16, 0.0, 0.0, 0.0, 0.0, 1.1102230246251565e-16, 4.163336342344337e-17, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.1102230246251565e-16, 0.0, -1.1102230246251565e-16, 0.0, 8.326672684688674e-17, 0.0, 0.0, 0.0, 8.881784197001252e-16, -8.881784197001252e-16, 0.0, -5.551115123125783e-17, 0.0, 0.0, 5.551115123125783e-17, 0.0, -4.440892098500626e-16, 2.220446049250313e-16, 0.0, 0.0, 0.0, 5.551115123125783e-17, 0.0, 0.0, 0.0, -2.7755575615628914e-17, -5.551115123125783e-17, 0.0, 2.220446049250313e-16, 0.0, 0.0, 0.0, -2.7755575615628914e-17, 1.1102230246251565e-16, -5.551115123125783e-17, 0.0, -5.551115123125783e-17, 0.0, -4.440892098500626e-16, 0.0, -2.220446049250313e-16, 0.0, -1.1102230246251565e-16, 4.163336342344337e-17, -2.7755575615628914e-17, 0.0, 0.0, 2.220446049250313e-16, -2.220446049250313e-16, 2.220446049250313e-16, 4.440892098500626e-16, 0.0, 0.0, 0.0, 2.7755575615628914e-17, 2.220446049250313e-16, 0.0, 0.0, 0.0, -5.551115123125783e-17, 0.0, 0.0, 4.440892098500626e-16, -2.220446049250313e-16, -4.440892098500626e-16, 0.0, 0.0, 0.0, 5.551115123125783e-17, 5.551115123125783e-17, 0.0, 0.0, 0.0, 0.0, 0.0, -2.220446049250313e-16, -2.7755575615628914e-17, 0.0, 2.7755575615628914e-17, -4.440892098500626e-16, -2.220446049250313e-16, 1.1102230246251565e-16, 0.0, 0.0, 0.0, 0.0, 0.0, 4.440892098500626e-16, 0.0, 0.0, -2.220446049250313e-16, 0.0, -4.440892098500626e-16, 8.881784197001252e-16, -2.7755575615628914e-17, -5.551115123125783e-17, 2.220446049250313e-16, 2.220446049250313e-16, 4.440892098500626e-16, 0.0, 4.440892098500626e-16, 0.0, 0.0, 0.0, 4.440892098500626e-16, 0.0, 0.0, 0.0, -1.1102230246251565e-16, 0.0, 0.0, 0.0, 0.0, -5.551115123125783e-17, 0.0, 1.1102230246251565e-16, 0.0, 4.85722573273506e-17, 0.0, 0.0, 0.0, -1.734723475976807e-17, 0.0, 0.0]\nThe error is equal to: 0.000000000000000888\n" ], [ "#%% Black_Scholes_Jumps.py\n\"\"\"\nCreated on July 05 2021\nImpact of conditional expectation pricing (Black-Scholes with Jump volatility)\n\nThis code is purely educational and comes from \"Financial Engineering\" course by L.A. Grzelak\nThe course is based on the book “Mathematical Modeling and Computation\nin Finance: With Exercises and Python and MATLAB Computer Codes”,\nby C.W. Oosterlee and L.A. Grzelak, World Scientific Publishing Europe Ltd, 2019.\n@author: Lech A. Grzelak\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport enum\nimport scipy.stats as st\n\n# This class defines puts and calls\nclass OptionType(enum.Enum):\n CALL = 1.0\n PUT = -1.0\n \ndef GeneratePaths(NoOfPaths,NoOfSteps,S0,T,muJ,sigmaJ,r): \n # Create empty matrices for Poisson process and for compensated Poisson process\n X = np.zeros([NoOfPaths, NoOfSteps+1])\n S = np.zeros([NoOfPaths, NoOfSteps+1])\n time = np.zeros([NoOfSteps+1])\n \n dt = T / float(NoOfSteps)\n X[:,0] = np.log(S0)\n S[:,0] = S0\n \n Z = np.random.normal(0.0,1.0,[NoOfPaths,NoOfSteps])\n J = np.random.normal(muJ,sigmaJ,[NoOfPaths,NoOfSteps])\n for i in range(0,NoOfSteps):\n # making sure that samples from normal have mean 0 and variance 1\n if NoOfPaths > 1:\n Z[:,i] = (Z[:,i] - np.mean(Z[:,i])) / np.std(Z[:,i])\n \n X[:,i+1] = X[:,i] + (r - 0.5*J[:,i]**2.0)*dt +J[:,i]*np.sqrt(dt)* Z[:,i]\n time[i+1] = time[i] +dt\n \n S = np.exp(X)\n paths = {\"time\":time,\"X\":X,\"S\":S,\"J\":J}\n return paths\n\ndef EUOptionPriceFromMCPaths(CP,S,K,T,r):\n # S is a vector of Monte Carlo samples at T\n if CP == OptionType.CALL:\n return np.exp(-r*T)*np.mean(np.maximum(S-K,0.0))\n elif CP == OptionType.PUT:\n return np.exp(-r*T)*np.mean(np.maximum(K-S,0.0))\n\ndef BS_Call_Put_Option_Price(CP,S_0,K,sigma,t,T,r):\n K = np.array(K).reshape([len(K),1])\n d1 = (np.log(S_0 / K) + (r + 0.5 * np.power(sigma,2.0))\n * (T-t)) / (sigma * np.sqrt(T-t))\n d2 = d1 - sigma * np.sqrt(T-t)\n if CP == OptionType.CALL:\n value = st.norm.cdf(d1) * S_0 - st.norm.cdf(d2) * K * np.exp(-r * (T-t))\n elif CP == OptionType.PUT:\n value = st.norm.cdf(-d2) * K * np.exp(-r * (T-t)) - st.norm.cdf(-d1)*S_0\n return value\n\ndef CallOption_CondExpectation(NoOfPaths,T,S0,K,J,r):\n \n # Jumps at time T\n J_i = J[:,-1]\n \n result = np.zeros([NoOfPaths])\n \n for j in range(0,NoOfPaths):\n sigma = J_i[j]\n result[j] = BS_Call_Put_Option_Price(OptionType.CALL,S0,[K],sigma,0.0,T,r)\n \n return np.mean(result)\n\ndef mainCalculation():\n NoOfPaths = 25\n NoOfSteps = 500\n T = 5\n muJ = 0.3\n sigmaJ = 0.005\n \n S0 =100\n r =0.00\n Paths = GeneratePaths(NoOfPaths,NoOfSteps,S0, T,muJ,sigmaJ,r)\n timeGrid = Paths[\"time\"]\n X = Paths[\"X\"]\n S = Paths[\"S\"]\n \n plt.figure(1)\n plt.plot(timeGrid, np.transpose(X)) \n plt.grid()\n plt.xlabel(\"time\")\n plt.ylabel(\"X(t)\")\n \n plt.figure(2)\n plt.plot(timeGrid, np.transpose(S)) \n plt.grid()\n plt.xlabel(\"time\")\n plt.ylabel(\"S(t)\")\n \n # Check the convergence for a given strike\n K = 80\n CP =OptionType.CALL\n \n NGrid = range(100,10000,1000)\n NoOfRuns = len(NGrid)\n \n resultMC = np.zeros([NoOfRuns])\n resultCondExp = np.zeros([NoOfRuns])\n \n for (i,N) in enumerate(NGrid):\n print(N)\n Paths = GeneratePaths(N,NoOfSteps,S0, T,muJ,sigmaJ,r)\n timeGrid = Paths[\"time\"]\n S = Paths[\"S\"]\n resultMC[i] = EUOptionPriceFromMCPaths(CP,S[:,-1],K,T,r)\n \n J = Paths[\"J\"]\n\n resultCondExp[i]=CallOption_CondExpectation(NoOfPaths,T,S0,K,J,r)\n \n plt.figure(3)\n plt.plot(NGrid,resultMC) \n plt.plot(NGrid,resultCondExp)\n plt.legend(['MC','Conditional Expectation'])\n plt.title('Call Option Price- Convergence')\n plt.xlabel('Number of Paths')\n plt.ylabel('Option price for a given strike, K')\n plt.grid()\n \nmainCalculation()", "100\n1100\n2100\n3100\n4100\n5100\n6100\n7100\n8100\n9100\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ] ]
4a7d104add8e93de1cd4e234f2aa43c9da7854fd
17,533
ipynb
Jupyter Notebook
ForwardList.ipynb
rambasnet/stl-notebooks
cd9e2a85a5513175fa205cb887b04cf34dd5c07d
[ "MIT" ]
2
2019-01-18T21:36:18.000Z
2019-04-01T20:11:11.000Z
ForwardList.ipynb
rambasnet/CPP-STL
cd9e2a85a5513175fa205cb887b04cf34dd5c07d
[ "MIT" ]
null
null
null
ForwardList.ipynb
rambasnet/CPP-STL
cd9e2a85a5513175fa205cb887b04cf34dd5c07d
[ "MIT" ]
null
null
null
19.83371
112
0.465522
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
4a7d11440070539b979f96361343d79c2e68d339
324,077
ipynb
Jupyter Notebook
Kaggle/KaggleCareerCon2019/Deep Learning Helping Navigate Robots.ipynb
dimitreOliveira/MachineLearning
ae5907ef1bf2620b62a33176fa1ef5208ed3bb91
[ "MIT" ]
13
2019-03-11T17:53:02.000Z
2022-03-10T14:18:17.000Z
Kaggle/KaggleCareerCon2019/Deep Learning Helping Navigate Robots.ipynb
dimitreOliveira/MachineLearning
ae5907ef1bf2620b62a33176fa1ef5208ed3bb91
[ "MIT" ]
1
2019-03-04T14:18:13.000Z
2019-03-05T20:56:22.000Z
Kaggle/KaggleCareerCon2019/Deep Learning Helping Navigate Robots.ipynb
dimitreOliveira/MachineLearning
ae5907ef1bf2620b62a33176fa1ef5208ed3bb91
[ "MIT" ]
6
2019-03-01T12:57:43.000Z
2021-12-20T01:04:08.000Z
390.454217
129,897
0.88309
[ [ [ "<h1><center>Deep Learning Helping Navigate Robots</center></h1>\n<img src=\"https://storage.googleapis.com/kaggle-competitions/kaggle/13242/logos/thumb76_76.png?t=2019-03-12-23-33-31\" width=\"300\"></img>\n\n### Dependencies", "_____no_output_____" ] ], [ [ "import warnings\nimport cufflinks\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom keras import optimizers\nfrom keras.layers import Dense\nfrom keras.utils import to_categorical\nfrom keras.models import Sequential, Model\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import train_test_split\n\n%matplotlib inline\nwarnings.filterwarnings(\"ignore\")\ncufflinks.go_offline(connected=True)\n\n# Set seeds to make the experiment more reproducible.\nfrom tensorflow import set_random_seed\nfrom numpy.random import seed\nset_random_seed(0)\nseed(0)", "Using TensorFlow backend.\n" ] ], [ [ "### Load data", "_____no_output_____" ] ], [ [ "train = pd.read_csv('../input/X_train.csv')\nlabels = pd.read_csv('../input/y_train.csv')\ntest = pd.read_csv('../input/X_test.csv')\n\nprint('Train features shape', train.shape)\ndisplay(train.head())\nprint('Train labels shape', labels.shape)\ndisplay(labels.head())\nprint('Test shape', test.shape)\ndisplay(test.head())", "Train features shape (487680, 13)\n" ] ], [ [ "### Join train features with labels", "_____no_output_____" ] ], [ [ "train = train.join(labels, on='series_id', rsuffix='_')\ntrain.drop('series_id_', axis=1, inplace=True)\nprint(train.shape)\ndisplay(train.head())", "(487680, 15)\n" ] ], [ [ "### Plotly graphs may take a while to load.\n\n# EDA\n\n## Surface distribution\n- Let's see what's the label distribution of our data", "_____no_output_____" ] ], [ [ "f, ax = plt.subplots(figsize=(12, 8))\nax = sns.countplot(y='surface', data=train, palette=\"rocket\", order=reversed(train['surface'].value_counts().index))\nax.set_ylabel(\"Surface type\")\nplt.show()", "_____no_output_____" ] ], [ [ "### Surface distribution by \"group_id\"", "_____no_output_____" ] ], [ [ "group_df = train.groupby(['group_id', 'surface'])['surface'].agg({'surface':['count']}).reset_index()\ngroup_df.columns = ['group_id', 'surface', 'count']\nf, ax = plt.subplots(figsize=(18, 8))\nax = sns.barplot(x=\"group_id\", y=\"count\", data=group_df, palette=\"GnBu_d\")\n\nfor index, row in group_df.iterrows():\n ax.text(row.name, row['count'], row['surface'], color='black', ha=\"center\", rotation=60)\n \nplt.show()", "_____no_output_____" ] ], [ [ "## Features distribution\n- Now would be a good idea to see how each other type of features behavior\n\n### Orientation distribution", "_____no_output_____" ] ], [ [ "orientation_features = ['orientation_X', 'orientation_Y', 'orientation_Z', 'orientation_W']\ntrain[orientation_features].iplot(kind='histogram', bins=200, subplots=True, shape=(len(orientation_features), 1))\ntrain[orientation_features].iplot(kind='histogram', barmode='overlay', bins=200)\ntrain[orientation_features].iplot(kind='box')", "_____no_output_____" ] ], [ [ "The interesting part here is that \"orientation_Y\" and \"orientation_X\" are far more spread than the other two.", "_____no_output_____" ], [ "### Angular velocity distribution", "_____no_output_____" ] ], [ [ "velocity_features = ['angular_velocity_X', 'angular_velocity_Y', 'angular_velocity_Z']\ntrain[velocity_features].iplot(kind='histogram', bins=200, subplots=True, shape=(len(velocity_features), 1))\ntrain[velocity_features].iplot(kind='histogram', barmode='overlay', bins=200)\ntrain[velocity_features].iplot(kind='box')", "_____no_output_____" ] ], [ [ "Here all the angular velocity features seem to be centered around 0, but \"angular_velocity_Y\" is less spread than the others.", "_____no_output_____" ], [ "### Linear acceleration distribution", "_____no_output_____" ] ], [ [ "acceleration_features = ['linear_acceleration_X', 'linear_acceleration_Y', 'linear_acceleration_Z']\ntrain[acceleration_features].iplot(kind='histogram', bins=200, subplots=True, shape=(len(acceleration_features), 1))\ntrain[acceleration_features].iplot(kind='histogram', barmode='overlay', bins=200)\ntrain[acceleration_features].iplot(kind='box')", "_____no_output_____" ] ], [ [ "The linear acceleration features seem to be the most different between itself, all 3 features have different mean and spread.", "_____no_output_____" ], [ "### Preprocess the labels", "_____no_output_____" ] ], [ [ "target = train['surface']\nn_labels = target.nunique()\nlabels_names = target.unique()\nle = LabelEncoder()\ntarget = le.fit_transform(target.values)\ntarget = to_categorical(target)\ntrain.drop('surface', axis=1, inplace=True)", "_____no_output_____" ] ], [ [ "### Train/validation split", "_____no_output_____" ] ], [ [ "features = ['orientation_X', 'orientation_Y', 'orientation_Z', 'orientation_W', \n 'angular_velocity_X', 'angular_velocity_Y', 'angular_velocity_Z', \n 'linear_acceleration_X', 'linear_acceleration_Y', 'linear_acceleration_Z']\n\nX_train, X_val, Y_train, Y_val = train_test_split(train[features], target, test_size=0.2, random_state=0)\nprint('Train shape', X_train.shape)\nprint('Validation shape', X_val.shape)\ndisplay(X_train.head())", "Train shape (390144, 10)\nValidation shape (97536, 10)\n" ] ], [ [ "### Model", "_____no_output_____" ] ], [ [ "epochs = 70\nbatch = 128\nlr = 0.001\nadam = optimizers.Adam(lr)", "_____no_output_____" ], [ "model = Sequential()\nmodel.add(Dense(20, activation='relu', input_dim=X_train.shape[1]))\nmodel.add(Dense(20, activation='relu'))\nmodel.add(Dense(n_labels, activation=\"softmax\"))\nmodel.compile(loss='categorical_crossentropy', optimizer=adam)\nmodel.summary()", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_1 (Dense) (None, 20) 220 \n_________________________________________________________________\ndense_2 (Dense) (None, 20) 420 \n_________________________________________________________________\ndense_3 (Dense) (None, 9) 189 \n=================================================================\nTotal params: 829\nTrainable params: 829\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "history = model.fit(X_train.values, Y_train, validation_data=(X_val.values, Y_val), epochs=epochs, verbose=2)", "Train on 390144 samples, validate on 97536 samples\nEpoch 1/70\n - 41s - loss: 1.6330 - val_loss: 1.4563\nEpoch 2/70\n - 38s - loss: 1.3321 - val_loss: 1.2441\nEpoch 3/70\n - 36s - loss: 1.1980 - val_loss: 1.1487\nEpoch 4/70\n - 37s - loss: 1.1159 - val_loss: 1.0756\nEpoch 5/70\n - 36s - loss: 1.0512 - val_loss: 1.0256\nEpoch 6/70\n - 37s - loss: 0.9879 - val_loss: 0.9491\nEpoch 7/70\n - 36s - loss: 0.9499 - val_loss: 0.9411\nEpoch 8/70\n - 37s - loss: 0.9286 - val_loss: 0.9152\nEpoch 9/70\n - 38s - loss: 0.9113 - val_loss: 0.8973\nEpoch 10/70\n - 38s - loss: 0.8956 - val_loss: 0.8878\nEpoch 11/70\n - 36s - loss: 0.8822 - val_loss: 0.8704\nEpoch 12/70\n - 37s - loss: 0.8688 - val_loss: 0.8666\nEpoch 13/70\n - 37s - loss: 0.8557 - val_loss: 0.8451\nEpoch 14/70\n - 37s - loss: 0.8427 - val_loss: 0.8238\nEpoch 15/70\n - 38s - loss: 0.8323 - val_loss: 0.8206\nEpoch 16/70\n - 37s - loss: 0.8221 - val_loss: 0.8077\nEpoch 17/70\n - 39s - loss: 0.8150 - val_loss: 0.8090\nEpoch 18/70\n - 37s - loss: 0.8066 - val_loss: 0.7924\nEpoch 19/70\n - 38s - loss: 0.7986 - val_loss: 0.7912\nEpoch 20/70\n - 37s - loss: 0.7931 - val_loss: 0.7917\nEpoch 21/70\n - 38s - loss: 0.7876 - val_loss: 0.7997\nEpoch 22/70\n - 37s - loss: 0.7827 - val_loss: 0.7685\nEpoch 23/70\n - 38s - loss: 0.7777 - val_loss: 0.7840\nEpoch 24/70\n - 37s - loss: 0.7730 - val_loss: 0.7613\nEpoch 25/70\n - 37s - loss: 0.7686 - val_loss: 0.7633\nEpoch 26/70\n - 38s - loss: 0.7647 - val_loss: 0.7691\nEpoch 27/70\n - 37s - loss: 0.7618 - val_loss: 0.7478\nEpoch 28/70\n - 37s - loss: 0.7587 - val_loss: 0.7605\nEpoch 29/70\n - 36s - loss: 0.7552 - val_loss: 0.7404\nEpoch 30/70\n - 37s - loss: 0.7528 - val_loss: 0.7725\nEpoch 31/70\n - 36s - loss: 0.7490 - val_loss: 0.7636\nEpoch 32/70\n - 38s - loss: 0.7445 - val_loss: 0.7454\nEpoch 33/70\n - 36s - loss: 0.7415 - val_loss: 0.7300\nEpoch 34/70\n - 39s - loss: 0.7375 - val_loss: 0.7284\nEpoch 35/70\n - 37s - loss: 0.7336 - val_loss: 0.7323\nEpoch 36/70\n - 37s - loss: 0.7289 - val_loss: 0.7457\nEpoch 37/70\n - 36s - loss: 0.7239 - val_loss: 0.7354\nEpoch 38/70\n - 37s - loss: 0.7162 - val_loss: 0.7158\nEpoch 39/70\n - 36s - loss: 0.7100 - val_loss: 0.6979\nEpoch 40/70\n - 36s - loss: 0.7052 - val_loss: 0.7456\nEpoch 41/70\n - 37s - loss: 0.7003 - val_loss: 0.7053\nEpoch 42/70\n - 38s - loss: 0.6960 - val_loss: 0.6994\nEpoch 43/70\n - 38s - loss: 0.6924 - val_loss: 0.6833\nEpoch 44/70\n - 36s - loss: 0.6890 - val_loss: 0.6837\nEpoch 45/70\n - 37s - loss: 0.6855 - val_loss: 0.6715\nEpoch 46/70\n - 36s - loss: 0.6828 - val_loss: 0.6771\nEpoch 47/70\n - 37s - loss: 0.6807 - val_loss: 0.6788\nEpoch 48/70\n - 36s - loss: 0.6788 - val_loss: 0.7246\nEpoch 49/70\n - 37s - loss: 0.6763 - val_loss: 0.6824\nEpoch 50/70\n - 36s - loss: 0.6738 - val_loss: 0.6798\nEpoch 51/70\n - 38s - loss: 0.6729 - val_loss: 0.6687\nEpoch 52/70\n - 37s - loss: 0.6706 - val_loss: 0.6588\nEpoch 53/70\n - 36s - loss: 0.6691 - val_loss: 0.6809\nEpoch 54/70\n - 37s - loss: 0.6677 - val_loss: 0.6554\nEpoch 55/70\n - 37s - loss: 0.6657 - val_loss: 0.6554\nEpoch 56/70\n - 37s - loss: 0.6633 - val_loss: 0.6449\nEpoch 57/70\n - 36s - loss: 0.6617 - val_loss: 0.6362\nEpoch 58/70\n - 37s - loss: 0.6606 - val_loss: 0.6590\nEpoch 59/70\n - 38s - loss: 0.6587 - val_loss: 0.6568\nEpoch 60/70\n - 37s - loss: 0.6561 - val_loss: 0.6548\nEpoch 61/70\n - 36s - loss: 0.6547 - val_loss: 0.6391\nEpoch 62/70\n - 37s - loss: 0.6525 - val_loss: 0.6662\nEpoch 63/70\n - 37s - loss: 0.6512 - val_loss: 0.6475\nEpoch 64/70\n - 36s - loss: 0.6488 - val_loss: 0.6333\nEpoch 65/70\n - 37s - loss: 0.6476 - val_loss: 0.6563\nEpoch 66/70\n - 36s - loss: 0.6458 - val_loss: 0.6355\nEpoch 67/70\n - 38s - loss: 0.6435 - val_loss: 0.6225\nEpoch 68/70\n - 37s - loss: 0.6417 - val_loss: 0.6233\nEpoch 69/70\n - 37s - loss: 0.6400 - val_loss: 0.6367\nEpoch 70/70\n - 36s - loss: 0.6388 - val_loss: 0.6469\n" ] ], [ [ "#### Model loss plot", "_____no_output_____" ] ], [ [ "history_pd = pd.DataFrame.from_dict(history.history)\nhistory_pd.iplot(kind='line')", "_____no_output_____" ] ], [ [ "#### Model confusion matrix", "_____no_output_____" ] ], [ [ "cnf_matrix = confusion_matrix(np.argmax(Y_train, axis=1), model.predict_classes(X_train))\ncnf_matrix_norm = cnf_matrix.astype('float') / cnf_matrix.sum(axis=1)[:, np.newaxis]\ndf_cm = pd.DataFrame(cnf_matrix_norm, index=labels_names, columns=labels_names)\n\nplt.figure(figsize=(20, 7))\nax = plt.axes()\nax.set_title('Train')\nsns.heatmap(df_cm, annot=True, fmt='.2f', cmap=\"Blues\", ax=ax)\nplt.show()\n\ncnf_matrix = confusion_matrix(np.argmax(Y_val, axis=1), model.predict_classes(X_val))\ncnf_matrix_norm = cnf_matrix.astype('float') / cnf_matrix.sum(axis=1)[:, np.newaxis]\ndf_cm = pd.DataFrame(cnf_matrix_norm, index=labels_names, columns=labels_names)\n\nplt.figure(figsize=(20, 7))\nax = plt.axes()\nax.set_title('Validation')\nsns.heatmap(df_cm, annot=True, fmt='.2f', cmap=\"Blues\", ax=ax)\nplt.show()", "_____no_output_____" ] ], [ [ "### Test predictions", "_____no_output_____" ] ], [ [ "predictions = model.predict_classes(test[features].values)\ntest['surface'] = le.inverse_transform(predictions)\ndf = test[['series_id', 'surface']]\ndf = df.groupby('series_id', as_index=False).agg(lambda x:x.value_counts().index[0])\ndf.to_csv('submission.csv', index=False)\ndf.head(10)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a7d17769f3956bdbb04a194703b29600f406b13
46,144
ipynb
Jupyter Notebook
tv-script-generation/dlnd_tv_script_generation.ipynb
jmvargas/deep-learning
7d9bd223c05908fec0c760a1f8366bb2d85075ff
[ "MIT" ]
null
null
null
tv-script-generation/dlnd_tv_script_generation.ipynb
jmvargas/deep-learning
7d9bd223c05908fec0c760a1f8366bb2d85075ff
[ "MIT" ]
null
null
null
tv-script-generation/dlnd_tv_script_generation.ipynb
jmvargas/deep-learning
7d9bd223c05908fec0c760a1f8366bb2d85075ff
[ "MIT" ]
null
null
null
34.616654
556
0.56237
[ [ [ "# TV Script Generation\nIn this project, you'll generate your own [Simpsons](https://en.wikipedia.org/wiki/The_Simpsons) TV scripts using RNNs. You'll be using part of the [Simpsons dataset](https://www.kaggle.com/wcukierski/the-simpsons-by-the-data) of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at [Moe's Tavern](https://simpsonswiki.com/wiki/Moe's_Tavern).\n## Get the Data\nThe data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like \"Moe's Cavern\", \"Flaming Moe's\", \"Uncle Moe's Family Feed-Bag\", etc..", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport helper\n\ndata_dir = './data/simpsons/moes_tavern_lines.txt'\ntext = helper.load_data(data_dir)\n# Ignore notice, since we don't use it for analysing the data\ntext = text[81:]", "_____no_output_____" ] ], [ [ "## Explore the Data\nPlay around with `view_sentence_range` to view different parts of the data.", "_____no_output_____" ] ], [ [ "view_sentence_range = (0, 10)\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport numpy as np\n\nprint('Dataset Stats')\nprint('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))\nscenes = text.split('\\n\\n')\nprint('Number of scenes: {}'.format(len(scenes)))\nsentence_count_scene = [scene.count('\\n') for scene in scenes]\nprint('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))\n\nsentences = [sentence for scene in scenes for sentence in scene.split('\\n')]\nprint('Number of lines: {}'.format(len(sentences)))\nword_count_sentence = [len(sentence.split()) for sentence in sentences]\nprint('Average number of words in each line: {}'.format(np.average(word_count_sentence)))\n\nprint()\nprint('The sentences {} to {}:'.format(*view_sentence_range))\nprint('\\n'.join(text.split('\\n')[view_sentence_range[0]:view_sentence_range[1]]))", "Dataset Stats\nRoughly the number of unique words: 11492\nNumber of scenes: 262\nAverage number of sentences in each scene: 15.248091603053435\nNumber of lines: 4257\nAverage number of words in each line: 11.50434578341555\n\nThe sentences 0 to 10:\nMoe_Szyslak: (INTO PHONE) Moe's Tavern. Where the elite meet to drink.\nBart_Simpson: Eh, yeah, hello, is Mike there? Last name, Rotch.\nMoe_Szyslak: (INTO PHONE) Hold on, I'll check. (TO BARFLIES) Mike Rotch. Mike Rotch. Hey, has anybody seen Mike Rotch, lately?\nMoe_Szyslak: (INTO PHONE) Listen you little puke. One of these days I'm gonna catch you, and I'm gonna carve my name on your back with an ice pick.\nMoe_Szyslak: What's the matter Homer? You're not your normal effervescent self.\nHomer_Simpson: I got my problems, Moe. Give me another one.\nMoe_Szyslak: Homer, hey, you should not drink to forget your problems.\nBarney_Gumble: Yeah, you should only drink to enhance your social skills.\n\n\n" ] ], [ [ "## Implement Preprocessing Functions\nThe first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below:\n- Lookup Table\n- Tokenize Punctuation\n\n### Lookup Table\nTo create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:\n- Dictionary to go from the words to an id, we'll call `vocab_to_int`\n- Dictionary to go from the id to word, we'll call `int_to_vocab`\n\nReturn these dictionaries in the following tuple `(vocab_to_int, int_to_vocab)`", "_____no_output_____" ] ], [ [ "import numpy as np\nimport problem_unittests as tests\nfrom collections import Counter\n\ndef create_lookup_tables(text):\n \"\"\"\n Create lookup tables for vocabulary\n :param text: The text of tv scripts split into words\n :return: A tuple of dicts (vocab_to_int, int_to_vocab)\n \"\"\"\n # TODO: Implement Function\n word_counts = Counter(text)\n sorted_vocab = sorted(word_counts, key=word_counts.get, reverse=True)\n int_to_vocab = {ii: word for ii, word in enumerate(sorted_vocab)}\n vocab_to_int = {word: ii for ii, word in int_to_vocab.items()}\n return vocab_to_int, int_to_vocab\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_create_lookup_tables(create_lookup_tables)", "Tests Passed\n" ] ], [ [ "### Tokenize Punctuation\nWe'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word \"bye\" and \"bye!\".\n\nImplement the function `token_lookup` to return a dict that will be used to tokenize symbols like \"!\" into \"||Exclamation_Mark||\". Create a dictionary for the following symbols where the symbol is the key and value is the token:\n- Period ( . )\n- Comma ( , )\n- Quotation Mark ( \" )\n- Semicolon ( ; )\n- Exclamation mark ( ! )\n- Question mark ( ? )\n- Left Parentheses ( ( )\n- Right Parentheses ( ) )\n- Dash ( -- )\n- Return ( \\n )\n\nThis dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token \"dash\", try using something like \"||dash||\".", "_____no_output_____" ] ], [ [ "def token_lookup():\n \"\"\"\n Generate a dict to turn punctuation into a token.\n :return: Tokenize dictionary where the key is the punctuation and the value is the token\n \"\"\"\n # TODO: Implement Function\n return {\n \".\": \"||period||\",\n \",\": \"||comma||\",\n \"\\\"\": \"||quotation_mark||\",\n \";\": \"||semicolon||\",\n \"!\": \"||exclamation_mark||\",\n \"?\": \"||question_mark||\",\n \"(\": \"||left_parentheses||\",\n \")\": \"||rigth_parentheses||\",\n \"--\": \"||dash||\",\n \"\\n\": \"||return||\",\n } \n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_tokenize(token_lookup)", "Tests Passed\n" ] ], [ [ "## Preprocess all the data and save it\nRunning the code cell below will preprocess all the data and save it to file.", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\n# Preprocess Training, Validation, and Testing Data\nhelper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)", "_____no_output_____" ] ], [ [ "# Check Point\nThis is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport helper\nimport numpy as np\nimport problem_unittests as tests\n\nint_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()", "_____no_output_____" ] ], [ [ "## Build the Neural Network\nYou'll build the components necessary to build a RNN by implementing the following functions below:\n- get_inputs\n- get_init_cell\n- get_embed\n- build_rnn\n- build_nn\n- get_batches\n\n### Check the Version of TensorFlow and Access to GPU", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nfrom distutils.version import LooseVersion\nimport warnings\nimport tensorflow as tf\n\n# Check TensorFlow Version\nassert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'\nprint('TensorFlow Version: {}'.format(tf.__version__))\n\n# Check for a GPU\nif not tf.test.gpu_device_name():\n warnings.warn('No GPU found. Please use a GPU to train your neural network.')\nelse:\n print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))", "TensorFlow Version: 1.0.0\nDefault GPU Device: /gpu:0\n" ] ], [ [ "### Input\nImplement the `get_inputs()` function to create TF Placeholders for the Neural Network. It should create the following placeholders:\n- Input text placeholder named \"input\" using the [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder) `name` parameter.\n- Targets placeholder\n- Learning Rate placeholder\n\nReturn the placeholders in the following tuple `(Input, Targets, LearningRate)`", "_____no_output_____" ] ], [ [ "def get_inputs():\n \"\"\"\n Create TF Placeholders for input, targets, and learning rate.\n :return: Tuple (input, targets, learning rate)\n \"\"\"\n # TODO: Implement Function\n input = tf.placeholder(tf.int32, [None, None], name='input')\n targets = tf.placeholder(tf.int32, [None, None], name='targets')\n learning_rate = tf.placeholder(tf.float32, name='learning_rate')\n return input, targets, learning_rate\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_get_inputs(get_inputs)", "Tests Passed\n" ] ], [ [ "### Build RNN Cell and Initialize\nStack one or more [`BasicLSTMCells`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/BasicLSTMCell) in a [`MultiRNNCell`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCell).\n- The Rnn size should be set using `rnn_size`\n- Initalize Cell State using the MultiRNNCell's [`zero_state()`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCell#zero_state) function\n - Apply the name \"initial_state\" to the initial state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)\n\nReturn the cell and initial state in the following tuple `(Cell, InitialState)`", "_____no_output_____" ] ], [ [ "def get_init_cell(batch_size, rnn_size):\n \"\"\"\n Create an RNN Cell and initialize it.\n :param batch_size: Size of batches\n :param rnn_size: Size of RNNs\n :return: Tuple (cell, initialize state)\n \"\"\"\n # TODO: Implement Function\n lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)\n\n cell = tf.contrib.rnn.MultiRNNCell([lstm])\n \n zero_state = cell.zero_state(batch_size, tf.float32)\n initial_state = tf.identity(zero_state, name='initial_state')\n return cell, initial_state\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_get_init_cell(get_init_cell)", "Tests Passed\n" ] ], [ [ "### Word Embedding\nApply embedding to `input_data` using TensorFlow. Return the embedded sequence.", "_____no_output_____" ] ], [ [ "def get_embed(input_data, vocab_size, embed_dim):\n \"\"\"\n Create embedding for <input_data>.\n :param input_data: TF placeholder for text input.\n :param vocab_size: Number of words in vocabulary.\n :param embed_dim: Number of embedding dimensions\n :return: Embedded input.\n \"\"\"\n # TODO: Implement Function\n embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1))\n embed = tf.nn.embedding_lookup(embedding, input_data)\n return embed\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_get_embed(get_embed)", "Tests Passed\n" ] ], [ [ "### Build RNN\nYou created a RNN Cell in the `get_init_cell()` function. Time to use the cell to create a RNN.\n- Build the RNN using the [`tf.nn.dynamic_rnn()`](https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn)\n - Apply the name \"final_state\" to the final state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)\n\nReturn the outputs and final_state state in the following tuple `(Outputs, FinalState)` ", "_____no_output_____" ] ], [ [ "def build_rnn(cell, inputs):\n \"\"\"\n Create a RNN using a RNN Cell\n :param cell: RNN Cell\n :param inputs: Input text data\n :return: Tuple (Outputs, Final State)\n \"\"\"\n # TODO: Implement Function\n outputs, state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)\n final_state = tf.identity(state, name='final_state')\n return outputs, final_state\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_build_rnn(build_rnn)", "Tests Passed\n" ] ], [ [ "### Build the Neural Network\nApply the functions you implemented above to:\n- Apply embedding to `input_data` using your `get_embed(input_data, vocab_size, embed_dim)` function.\n- Build RNN using `cell` and your `build_rnn(cell, inputs)` function.\n- Apply a fully connected layer with a linear activation and `vocab_size` as the number of outputs.\n\nReturn the logits and final state in the following tuple (Logits, FinalState) ", "_____no_output_____" ] ], [ [ "def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):\n \"\"\"\n Build part of the neural network\n :param cell: RNN cell\n :param rnn_size: Size of rnns\n :param input_data: Input data\n :param vocab_size: Vocabulary size\n :param embed_dim: Number of embedding dimensions\n :return: Tuple (Logits, FinalState)\n \"\"\"\n # TODO: Implement Function\n embedding = get_embed(input_data, vocab_size, rnn_size)\n outputs, final_state = build_rnn(cell, embedding)\n logits = tf.contrib.layers.fully_connected(outputs, vocab_size, activation_fn=None)\n return logits, final_state\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_build_nn(build_nn)", "Tests Passed\n" ] ], [ [ "### Batches\nImplement `get_batches` to create batches of input and targets using `int_text`. The batches should be a Numpy array with the shape `(number of batches, 2, batch size, sequence length)`. Each batch contains two elements:\n- The first element is a single batch of **input** with the shape `[batch size, sequence length]`\n- The second element is a single batch of **targets** with the shape `[batch size, sequence length]`\n\nIf you can't fill the last batch with enough data, drop the last batch.\n\nFor exmple, `get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], 3, 2)` would return a Numpy array of the following:\n```\n[\n # First Batch\n [\n # Batch of Input\n [[ 1 2], [ 7 8], [13 14]]\n # Batch of targets\n [[ 2 3], [ 8 9], [14 15]]\n ]\n\n # Second Batch\n [\n # Batch of Input\n [[ 3 4], [ 9 10], [15 16]]\n # Batch of targets\n [[ 4 5], [10 11], [16 17]]\n ]\n\n # Third Batch\n [\n # Batch of Input\n [[ 5 6], [11 12], [17 18]]\n # Batch of targets\n [[ 6 7], [12 13], [18 1]]\n ]\n]\n```\n\nNotice that the last target value in the last batch is the first input value of the first batch. In this case, `1`. This is a common technique used when creating sequence batches, although it is rather unintuitive.", "_____no_output_____" ] ], [ [ "def get_batches(int_text, batch_size, seq_length):\n \"\"\"\n Return batches of input and target\n :param int_text: Text with the words replaced by their ids\n :param batch_size: The size of batch\n :param seq_length: The length of sequence\n :return: Batches as a Numpy array\n \"\"\"\n # TODO: Implement Function\n n_batches = int(len(int_text) / (batch_size * seq_length))\n\n # Drop the last few characters to make only full batches\n xdata = np.array(int_text[: n_batches * batch_size * seq_length])\n ydata = np.roll(xdata,-1)\n\n x_batches = np.split(xdata.reshape(batch_size, -1), n_batches, 1)\n y_batches = np.split(ydata.reshape(batch_size, -1), n_batches, 1)\n\n return np.array(list(zip(x_batches, y_batches)))\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_get_batches(get_batches)", "Tests Passed\n" ] ], [ [ "## Neural Network Training\n### Hyperparameters\nTune the following parameters:\n\n- Set `num_epochs` to the number of epochs.\n- Set `batch_size` to the batch size.\n- Set `rnn_size` to the size of the RNNs.\n- Set `embed_dim` to the size of the embedding.\n- Set `seq_length` to the length of sequence.\n- Set `learning_rate` to the learning rate.\n- Set `show_every_n_batches` to the number of batches the neural network should print progress.", "_____no_output_____" ] ], [ [ "# Number of Epochs\nnum_epochs = 2000\n# Batch Size\nbatch_size = 256\n# RNN Size\nrnn_size = 128\n# Embedding Dimension Size\nembed_dim = 256\n# Sequence Length\nseq_length = 32\n# Learning Rate\nlearning_rate = 0.001\n# Show stats for every n number of batches\nshow_every_n_batches = 100\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\nsave_dir = './save'", "_____no_output_____" ] ], [ [ "### Build the Graph\nBuild the graph using the neural network you implemented.", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nfrom tensorflow.contrib import seq2seq\n\ntrain_graph = tf.Graph()\nwith train_graph.as_default():\n vocab_size = len(int_to_vocab)\n input_text, targets, lr = get_inputs()\n input_data_shape = tf.shape(input_text)\n cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)\n logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim)\n\n # Probabilities for generating words\n probs = tf.nn.softmax(logits, name='probs')\n\n # Loss function\n cost = seq2seq.sequence_loss(\n logits,\n targets,\n tf.ones([input_data_shape[0], input_data_shape[1]]))\n\n # Optimizer\n optimizer = tf.train.AdamOptimizer(lr)\n\n # Gradient Clipping\n gradients = optimizer.compute_gradients(cost)\n capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]\n train_op = optimizer.apply_gradients(capped_gradients)", "_____no_output_____" ] ], [ [ "## Train\nTrain the neural network on the preprocessed data. If you have a hard time getting a good loss, check the [forums](https://discussions.udacity.com/) to see if anyone is having the same problem.", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nbatches = get_batches(int_text, batch_size, seq_length)\n\nwith tf.Session(graph=train_graph) as sess:\n sess.run(tf.global_variables_initializer())\n\n for epoch_i in range(num_epochs):\n state = sess.run(initial_state, {input_text: batches[0][0]})\n\n for batch_i, (x, y) in enumerate(batches):\n feed = {\n input_text: x,\n targets: y,\n initial_state: state,\n lr: learning_rate}\n train_loss, state, _ = sess.run([cost, final_state, train_op], feed)\n\n # Show every <show_every_n_batches> batches\n if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:\n print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(\n epoch_i,\n batch_i,\n len(batches),\n train_loss))\n\n # Save Model\n saver = tf.train.Saver()\n saver.save(sess, save_dir)\n print('Model Trained and Saved')", "Epoch 0 Batch 0/8 train_loss = 8.822\nEpoch 12 Batch 4/8 train_loss = 5.923\nEpoch 25 Batch 0/8 train_loss = 5.485\nEpoch 37 Batch 4/8 train_loss = 5.088\nEpoch 50 Batch 0/8 train_loss = 4.792\nEpoch 62 Batch 4/8 train_loss = 4.550\nEpoch 75 Batch 0/8 train_loss = 4.378\nEpoch 87 Batch 4/8 train_loss = 4.206\nEpoch 100 Batch 0/8 train_loss = 4.071\nEpoch 112 Batch 4/8 train_loss = 3.930\nEpoch 125 Batch 0/8 train_loss = 3.811\nEpoch 137 Batch 4/8 train_loss = 3.692\nEpoch 150 Batch 0/8 train_loss = 3.582\nEpoch 162 Batch 4/8 train_loss = 3.478\nEpoch 175 Batch 0/8 train_loss = 3.380\nEpoch 187 Batch 4/8 train_loss = 3.286\nEpoch 200 Batch 0/8 train_loss = 3.202\nEpoch 212 Batch 4/8 train_loss = 3.116\nEpoch 225 Batch 0/8 train_loss = 3.040\nEpoch 237 Batch 4/8 train_loss = 2.960\nEpoch 250 Batch 0/8 train_loss = 2.888\nEpoch 262 Batch 4/8 train_loss = 2.812\nEpoch 275 Batch 0/8 train_loss = 2.745\nEpoch 287 Batch 4/8 train_loss = 2.669\nEpoch 300 Batch 0/8 train_loss = 2.600\nEpoch 312 Batch 4/8 train_loss = 2.531\nEpoch 325 Batch 0/8 train_loss = 2.464\nEpoch 337 Batch 4/8 train_loss = 2.400\nEpoch 350 Batch 0/8 train_loss = 2.336\nEpoch 362 Batch 4/8 train_loss = 2.276\nEpoch 375 Batch 0/8 train_loss = 2.207\nEpoch 387 Batch 4/8 train_loss = 2.156\nEpoch 400 Batch 0/8 train_loss = 2.094\nEpoch 412 Batch 4/8 train_loss = 2.041\nEpoch 425 Batch 0/8 train_loss = 1.984\nEpoch 437 Batch 4/8 train_loss = 1.935\nEpoch 450 Batch 0/8 train_loss = 1.878\nEpoch 462 Batch 4/8 train_loss = 1.832\nEpoch 475 Batch 0/8 train_loss = 1.783\nEpoch 487 Batch 4/8 train_loss = 1.741\nEpoch 500 Batch 0/8 train_loss = 1.691\nEpoch 512 Batch 4/8 train_loss = 1.655\nEpoch 525 Batch 0/8 train_loss = 1.613\nEpoch 537 Batch 4/8 train_loss = 1.570\nEpoch 550 Batch 0/8 train_loss = 1.537\nEpoch 562 Batch 4/8 train_loss = 1.496\nEpoch 575 Batch 0/8 train_loss = 1.456\nEpoch 587 Batch 4/8 train_loss = 1.423\nEpoch 600 Batch 0/8 train_loss = 1.391\nEpoch 612 Batch 4/8 train_loss = 1.361\nEpoch 625 Batch 0/8 train_loss = 1.330\nEpoch 637 Batch 4/8 train_loss = 1.293\nEpoch 650 Batch 0/8 train_loss = 1.295\nEpoch 662 Batch 4/8 train_loss = 1.251\nEpoch 675 Batch 0/8 train_loss = 1.227\nEpoch 687 Batch 4/8 train_loss = 1.190\nEpoch 700 Batch 0/8 train_loss = 1.171\nEpoch 712 Batch 4/8 train_loss = 1.134\nEpoch 725 Batch 0/8 train_loss = 1.120\nEpoch 737 Batch 4/8 train_loss = 1.089\nEpoch 750 Batch 0/8 train_loss = 1.074\nEpoch 762 Batch 4/8 train_loss = 1.043\nEpoch 775 Batch 0/8 train_loss = 1.028\nEpoch 787 Batch 4/8 train_loss = 1.000\nEpoch 800 Batch 0/8 train_loss = 0.986\nEpoch 812 Batch 4/8 train_loss = 0.961\nEpoch 825 Batch 0/8 train_loss = 0.946\nEpoch 837 Batch 4/8 train_loss = 0.921\nEpoch 850 Batch 0/8 train_loss = 0.903\nEpoch 862 Batch 4/8 train_loss = 0.883\nEpoch 875 Batch 0/8 train_loss = 0.868\nEpoch 887 Batch 4/8 train_loss = 0.854\nEpoch 900 Batch 0/8 train_loss = 0.841\nEpoch 912 Batch 4/8 train_loss = 0.824\nEpoch 925 Batch 0/8 train_loss = 0.810\nEpoch 937 Batch 4/8 train_loss = 0.796\nEpoch 950 Batch 0/8 train_loss = 0.778\nEpoch 962 Batch 4/8 train_loss = 0.756\nEpoch 975 Batch 0/8 train_loss = 0.743\nEpoch 987 Batch 4/8 train_loss = 0.742\nEpoch 1000 Batch 0/8 train_loss = 0.724\nEpoch 1012 Batch 4/8 train_loss = 0.711\nEpoch 1025 Batch 0/8 train_loss = 0.699\nEpoch 1037 Batch 4/8 train_loss = 0.684\nEpoch 1050 Batch 0/8 train_loss = 0.667\nEpoch 1062 Batch 4/8 train_loss = 0.655\nEpoch 1075 Batch 0/8 train_loss = 0.650\nEpoch 1087 Batch 4/8 train_loss = 0.638\nEpoch 1100 Batch 0/8 train_loss = 0.633\nEpoch 1112 Batch 4/8 train_loss = 0.611\nEpoch 1125 Batch 0/8 train_loss = 0.602\nEpoch 1137 Batch 4/8 train_loss = 0.593\nEpoch 1150 Batch 0/8 train_loss = 0.584\nEpoch 1162 Batch 4/8 train_loss = 0.576\nEpoch 1175 Batch 0/8 train_loss = 0.561\nEpoch 1187 Batch 4/8 train_loss = 0.555\nEpoch 1200 Batch 0/8 train_loss = 0.551\nEpoch 1212 Batch 4/8 train_loss = 0.531\nEpoch 1225 Batch 0/8 train_loss = 0.526\nEpoch 1237 Batch 4/8 train_loss = 0.520\nEpoch 1250 Batch 0/8 train_loss = 0.511\nEpoch 1262 Batch 4/8 train_loss = 0.495\nEpoch 1275 Batch 0/8 train_loss = 0.499\nEpoch 1287 Batch 4/8 train_loss = 0.481\nEpoch 1300 Batch 0/8 train_loss = 0.475\nEpoch 1312 Batch 4/8 train_loss = 0.468\nEpoch 1325 Batch 0/8 train_loss = 0.462\nEpoch 1337 Batch 4/8 train_loss = 0.449\nEpoch 1350 Batch 0/8 train_loss = 0.445\nEpoch 1362 Batch 4/8 train_loss = 0.446\nEpoch 1375 Batch 0/8 train_loss = 0.430\nEpoch 1387 Batch 4/8 train_loss = 0.426\nEpoch 1400 Batch 0/8 train_loss = 0.418\nEpoch 1412 Batch 4/8 train_loss = 0.411\nEpoch 1425 Batch 0/8 train_loss = 0.404\nEpoch 1437 Batch 4/8 train_loss = 0.393\nEpoch 1450 Batch 0/8 train_loss = 0.406\nEpoch 1462 Batch 4/8 train_loss = 0.379\nEpoch 1475 Batch 0/8 train_loss = 0.383\nEpoch 1487 Batch 4/8 train_loss = 0.368\nEpoch 1500 Batch 0/8 train_loss = 0.369\nEpoch 1512 Batch 4/8 train_loss = 0.366\nEpoch 1525 Batch 0/8 train_loss = 0.357\nEpoch 1537 Batch 4/8 train_loss = 0.351\nEpoch 1550 Batch 0/8 train_loss = 0.350\nEpoch 1562 Batch 4/8 train_loss = 0.339\nEpoch 1575 Batch 0/8 train_loss = 0.343\nEpoch 1587 Batch 4/8 train_loss = 0.330\nEpoch 1600 Batch 0/8 train_loss = 0.328\nEpoch 1612 Batch 4/8 train_loss = 0.317\nEpoch 1625 Batch 0/8 train_loss = 0.320\nEpoch 1637 Batch 4/8 train_loss = 0.309\nEpoch 1650 Batch 0/8 train_loss = 0.315\nEpoch 1662 Batch 4/8 train_loss = 0.298\nEpoch 1675 Batch 0/8 train_loss = 0.326\nEpoch 1687 Batch 4/8 train_loss = 0.288\nEpoch 1700 Batch 0/8 train_loss = 0.289\nEpoch 1712 Batch 4/8 train_loss = 0.288\nEpoch 1725 Batch 0/8 train_loss = 0.282\nEpoch 1737 Batch 4/8 train_loss = 0.283\nEpoch 1750 Batch 0/8 train_loss = 0.272\nEpoch 1762 Batch 4/8 train_loss = 0.265\nEpoch 1775 Batch 0/8 train_loss = 0.274\nEpoch 1787 Batch 4/8 train_loss = 0.257\nEpoch 1800 Batch 0/8 train_loss = 0.267\nEpoch 1812 Batch 4/8 train_loss = 0.254\nEpoch 1825 Batch 0/8 train_loss = 0.247\nEpoch 1837 Batch 4/8 train_loss = 0.248\nEpoch 1850 Batch 0/8 train_loss = 0.279\nEpoch 1862 Batch 4/8 train_loss = 0.237\nEpoch 1875 Batch 0/8 train_loss = 0.235\nEpoch 1887 Batch 4/8 train_loss = 0.234\nEpoch 1900 Batch 0/8 train_loss = 0.236\nEpoch 1912 Batch 4/8 train_loss = 0.223\nEpoch 1925 Batch 0/8 train_loss = 0.223\nEpoch 1937 Batch 4/8 train_loss = 0.239\nEpoch 1950 Batch 0/8 train_loss = 0.216\nEpoch 1962 Batch 4/8 train_loss = 0.217\nEpoch 1975 Batch 0/8 train_loss = 0.236\nEpoch 1987 Batch 4/8 train_loss = 0.207\nModel Trained and Saved\n" ] ], [ [ "## Save Parameters\nSave `seq_length` and `save_dir` for generating a new TV script.", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\n# Save parameters for checkpoint\nhelper.save_params((seq_length, save_dir))", "_____no_output_____" ] ], [ [ "# Checkpoint", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport tensorflow as tf\nimport numpy as np\nimport helper\nimport problem_unittests as tests\n\n_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()\nseq_length, load_dir = helper.load_params()", "_____no_output_____" ] ], [ [ "## Implement Generate Functions\n### Get Tensors\nGet tensors from `loaded_graph` using the function [`get_tensor_by_name()`](https://www.tensorflow.org/api_docs/python/tf/Graph#get_tensor_by_name). Get the tensors using the following names:\n- \"input:0\"\n- \"initial_state:0\"\n- \"final_state:0\"\n- \"probs:0\"\n\nReturn the tensors in the following tuple `(InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)` ", "_____no_output_____" ] ], [ [ "def get_tensors(loaded_graph):\n \"\"\"\n Get input, initial state, final state, and probabilities tensor from <loaded_graph>\n :param loaded_graph: TensorFlow graph loaded from file\n :return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)\n \"\"\"\n # TODO: Implement Function\n input = loaded_graph.get_tensor_by_name(\"input:0\")\n initial_state = loaded_graph.get_tensor_by_name(\"initial_state:0\")\n final_state = loaded_graph.get_tensor_by_name(\"final_state:0\")\n probs = loaded_graph.get_tensor_by_name(\"probs:0\")\n\n return input, initial_state, final_state, probs\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_get_tensors(get_tensors)", "Tests Passed\n" ] ], [ [ "### Choose Word\nImplement the `pick_word()` function to select the next word using `probabilities`.", "_____no_output_____" ] ], [ [ "def pick_word(probabilities, int_to_vocab):\n \"\"\"\n Pick the next word in the generated text\n :param probabilities: Probabilites of the next word\n :param int_to_vocab: Dictionary of word ids as the keys and words as the values\n :return: String of the predicted word\n \"\"\"\n # TODO: Implement Function\n index = np.argmax(probabilities)\n return int_to_vocab[index]\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_pick_word(pick_word)", "Tests Passed\n" ] ], [ [ "## Generate TV Script\nThis will generate the TV script for you. Set `gen_length` to the length of TV script you want to generate.", "_____no_output_____" ] ], [ [ "gen_length = 200\n# homer_simpson, moe_szyslak, or Barney_Gumble\nprime_word = 'moe_szyslak'\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\nloaded_graph = tf.Graph()\nwith tf.Session(graph=loaded_graph) as sess:\n # Load saved model\n loader = tf.train.import_meta_graph(load_dir + '.meta')\n loader.restore(sess, load_dir)\n\n # Get Tensors from loaded model\n input_text, initial_state, final_state, probs = get_tensors(loaded_graph)\n\n # Sentences generation setup\n gen_sentences = [prime_word + ':']\n prev_state = sess.run(initial_state, {input_text: np.array([[1]])})\n\n # Generate sentences\n for n in range(gen_length):\n # Dynamic Input\n dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]\n dyn_seq_length = len(dyn_input[0])\n\n # Get Prediction\n probabilities, prev_state = sess.run(\n [probs, final_state],\n {input_text: dyn_input, initial_state: prev_state})\n \n pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)\n\n gen_sentences.append(pred_word)\n \n # Remove tokens\n tv_script = ' '.join(gen_sentences)\n for key, token in token_dict.items():\n ending = ' ' if key in ['\\n', '(', '\"'] else ''\n tv_script = tv_script.replace(' ' + token.lower(), key)\n tv_script = tv_script.replace('\\n ', '\\n')\n tv_script = tv_script.replace('( ', '(')\n \n print(tv_script)", "moe_szyslak:(eyeing homer's ass) oh yeah, that would look so good on me.\n\n\nmoe_szyslak:(hostile) hey homer, i told you not to come on? bart and that's why i came to your time, for all you know.\nbarney_gumble:(toasting) for all we take a nap so i'm awake for the meteor shower.\nwaylon_smithers: well, i'm here protecting our investment.\nmoe_szyslak: and i was in bartending school, i thought i had the world by the jigger.\nhomer_simpson: moe, your daughter loves you--(choking noises)\nhomer_simpson: sorry moe, your bleak outlook has sent a smooth, icy chill down...(dropping character) everybody, lisa won't take your eyeball and make soup out of it all the little chick?\nmoe_szyslak: or what do i put this delicately? i don't got enough booze in this place to make you a beer.\nhomer_simpson:(gasps) what?\nmoe_szyslak: legally, i can't say.\nbarney_gumble: from moe! the tree is real!\n" ] ], [ [ "# The TV Script is Nonsensical\nIt's ok if the TV script doesn't make any sense. We trained on less than a megabyte of text. In order to get good results, you'll have to use a smaller vocabulary or get more data. Luckly there's more data! As we mentioned in the begging of this project, this is a subset of [another dataset](https://www.kaggle.com/wcukierski/the-simpsons-by-the-data). We didn't have you train on all the data, because that would take too long. However, you are free to train your neural network on all the data. After you complete the project, of course.\n# Submitting This Project\nWhen submitting this project, make sure to run all the cells before saving the notebook. Save the notebook file as \"dlnd_tv_script_generation.ipynb\" and save it as a HTML file under \"File\" -> \"Download as\". Include the \"helper.py\" and \"problem_unittests.py\" files in your submission.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
4a7d3abd5dabcae7be3792c2114f7bc379d7f814
32,987
ipynb
Jupyter Notebook
Untitled.ipynb
beto3110/pruebas
083d2d5de615aa5f8c96570260293e7b8210a3cc
[ "MIT" ]
null
null
null
Untitled.ipynb
beto3110/pruebas
083d2d5de615aa5f8c96570260293e7b8210a3cc
[ "MIT" ]
null
null
null
Untitled.ipynb
beto3110/pruebas
083d2d5de615aa5f8c96570260293e7b8210a3cc
[ "MIT" ]
null
null
null
159.357488
11,252
0.910995
[ [ [ "from pylab import*", "_____no_output_____" ], [ "x=linspace(1,10)", "_____no_output_____" ], [ "y=1/(x**2)", "_____no_output_____" ], [ "plot(x,y)", "_____no_output_____" ], [ "z=linspace(0,100,1001)\nprint(z)", "[ 0. 0.1 0.2 ... 99.8 99.9 100. ]\n" ], [ "z[6]", "_____no_output_____" ], [ "plot(x,x**2)", "_____no_output_____" ], [ "def mi_sin_p(x,n=10):\n f = 0\n for n in range(n):\n f += ((-1)**n) * ((x**(2*n+1))/(2*n+1))\n return f", "_____no_output_____" ], [ "plot(x,mi_sin_p(x))", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a7d3f7888e8a3c24d9667c0ba22dff88f5e1391
2,686
ipynb
Jupyter Notebook
001-house-password.ipynb
singlejoyce/jupyter_notebook
87e13e9e33cf2f0956bfcb7bcc2afd881188a579
[ "Apache-2.0" ]
null
null
null
001-house-password.ipynb
singlejoyce/jupyter_notebook
87e13e9e33cf2f0956bfcb7bcc2afd881188a579
[ "Apache-2.0" ]
null
null
null
001-house-password.ipynb
singlejoyce/jupyter_notebook
87e13e9e33cf2f0956bfcb7bcc2afd881188a579
[ "Apache-2.0" ]
null
null
null
26.594059
96
0.537975
[ [ [ "斯蒂芬和索菲亚对于一切都使用简单的密码,忘记了安全性。\n请你帮助尼古拉开发一个密码安全检查模块。\n如果密码的长度大于或等于10个字符,且其中至少有一个数字、一个大写字母和一个小写字母,该密码将被视为足够强大。\n密码只包含ASCII拉丁字母或数字。\n\n输入: 密码。\n\n输出: 密码的安全与否,作为布尔值(bool),或者任何可以转换和处理为布尔值的数据类型。你会在结果看到转换后的结果(True 或 False)。\n\n范例:\n\ncheckio('A1213pokl') == False\ncheckio('bAse730onE') == True\ncheckio('asasasasasasasaas') == False\ncheckio('QWERTYqwerty') == False\ncheckio('123456123456') == False\ncheckio('QwErTy911poqqqq') == True\n\n\n如何使用: 如果你担心你的应用或服务的安全性,您可以检查用户密码的复杂性。你可以使用这些技巧要求你的用户的密码符合多个条件(标点符号或unicode)。\n\n前提: \nre.match(\"[a-zA-Z0-9]+\", password) \n0 < len(password) ≤ 64", "_____no_output_____" ], [ "import re\n\n\ndef checkio(password) -> bool:\n rematch = re.match(\"^(?=.*[a-z])(?=.*[A-Z])(?=.*\\d)[a-zA-Z\\d]{10,}$\", password)\n if rematch:\n return True\n else:\n return False\n\n\nif __name__ == '__main__':\n # These \"asserts\" using only for self-checking and not necessary for auto-testing\n assert checkio('A1213pokl') == False, \"1st example\"\n assert checkio('bAse730onE4') == True, \"2nd example\"\n assert checkio('asasasasasasasaas') == False, \"3rd example\"\n assert checkio('QWERTYqwerty') == False, \"4th example\"\n assert checkio('123456123456') == False, \"5th example\"\n assert checkio('QwErTy911poqqqq') == True, \"6th example\"\n print(\"Coding complete? Click 'Check' to review your tests and earn cool rewards!\")", "Coding complete? Click 'Check' to review your tests and earn cool rewards!\n" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
4a7d50bda2e47e1b5f5525acd8aecc459b32d301
19,429
ipynb
Jupyter Notebook
lab12.ipynb
epicha/ia-241
f71a76a93fa1647be2efe6d4aaf6c172b3722dd1
[ "MIT" ]
null
null
null
lab12.ipynb
epicha/ia-241
f71a76a93fa1647be2efe6d4aaf6c172b3722dd1
[ "MIT" ]
null
null
null
lab12.ipynb
epicha/ia-241
f71a76a93fa1647be2efe6d4aaf6c172b3722dd1
[ "MIT" ]
null
null
null
27.480905
96
0.365433
[ [ [ "## Lab 12 Data Analysis Python", "_____no_output_____" ] ], [ [ "import pandas", "_____no_output_____" ], [ "df = pandas.read_excel('s3://picha-ia241-2021spring/house_price.xls')\n\ndf[:10]", "_____no_output_____" ] ], [ [ "## 2.1", "_____no_output_____" ] ], [ [ "df['unit_price']=df['price']/df['area']\ndf[:10]", "_____no_output_____" ] ], [ [ "## 2.2", "_____no_output_____" ] ], [ [ "df['house_type'].value_counts()", "_____no_output_____" ] ], [ [ "## 2.3", "_____no_output_____" ] ], [ [ "prc_more_2_bath=df.loc[ df['bathroom']>2 ]['price']\n\nprint('avg price of house more than 2 bathrooms is ${}'.format(prc_more_2_bath.mean()))", "avg price of house more than 2 bathrooms is $383645.45454545453\n" ] ], [ [ "## 2.4", "_____no_output_____" ] ], [ [ "print('unit price is ${}'.format(df['unit_price'].mean()))", "unit price is $167.45934522134766\n" ], [ "print('unit price is ${}'.format(df['unit_price'].median()))", "unit price is $130.13392857142858\n" ] ], [ [ "## 2.5", "_____no_output_____" ] ], [ [ "df.groupby('house_type').mean()['price']", "_____no_output_____" ] ], [ [ "## 2.6", "_____no_output_____" ] ], [ [ "from scipy import stats", "_____no_output_____" ], [ "result=stats.linregress(df['area'],df['price'])", "_____no_output_____" ], [ "print('slope is {}'.format(result.slope))\nprint('intercept is{}'.format(result.intercept))\nprint('r square is{}'.format(result.rvalue))\nprint('p value is {}'.format(result.pvalue))", "slope is 79.95495729411489\nintercept is156254.76245096227\nr square is0.4841384225498625\np value is 0.001340065037461188\n" ] ], [ [ "## 2.7", "_____no_output_____" ] ], [ [ "print('price of house with {}sqft is ${}'.format(2000,2000*result.slope+result.intercept))", "price of house with 2000sqft is $316164.67703919206\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
4a7d65b8914be56164cd52c486b111a9d8fe6551
17,252
ipynb
Jupyter Notebook
intro-to-pytorch/Part 1 - Tensors in PyTorch (Exercises).ipynb
bao1981105/deep-learning-v2-pytorch
c3fccc732ac28944dee6195abc8ecee083594030
[ "MIT" ]
null
null
null
intro-to-pytorch/Part 1 - Tensors in PyTorch (Exercises).ipynb
bao1981105/deep-learning-v2-pytorch
c3fccc732ac28944dee6195abc8ecee083594030
[ "MIT" ]
null
null
null
intro-to-pytorch/Part 1 - Tensors in PyTorch (Exercises).ipynb
bao1981105/deep-learning-v2-pytorch
c3fccc732ac28944dee6195abc8ecee083594030
[ "MIT" ]
null
null
null
38.508929
674
0.598597
[ [ [ "# Introduction to Deep Learning with PyTorch\n\nIn this notebook, you'll get introduced to [PyTorch](http://pytorch.org/), a framework for building and training neural networks. PyTorch in a lot of ways behaves like the arrays you love from Numpy. These Numpy arrays, after all, are just tensors. PyTorch takes these tensors and makes it simple to move them to GPUs for the faster processing needed when training neural networks. It also provides a module that automatically calculates gradients (for backpropagation!) and another module specifically for building neural networks. All together, PyTorch ends up being more coherent with Python and the Numpy/Scipy stack compared to TensorFlow and other frameworks.\n\n", "_____no_output_____" ], [ "## Neural Networks\n\nDeep Learning is based on artificial neural networks which have been around in some form since the late 1950s. The networks are built from individual parts approximating neurons, typically called units or simply \"neurons.\" Each unit has some number of weighted inputs. These weighted inputs are summed together (a linear combination) then passed through an activation function to get the unit's output.\n\n<img src=\"assets/simple_neuron.png\" width=400px>\n\nMathematically this looks like: \n\n$$\n\\begin{align}\ny &= f(w_1 x_1 + w_2 x_2 + b) \\\\\ny &= f\\left(\\sum_i w_i x_i +b \\right)\n\\end{align}\n$$\n\nWith vectors this is the dot/inner product of two vectors:\n\n$$\nh = \\begin{bmatrix}\nx_1 \\, x_2 \\cdots x_n\n\\end{bmatrix}\n\\cdot \n\\begin{bmatrix}\n w_1 \\\\\n w_2 \\\\\n \\vdots \\\\\n w_n\n\\end{bmatrix}\n$$", "_____no_output_____" ], [ "## Tensors\n\nIt turns out neural network computations are just a bunch of linear algebra operations on *tensors*, a generalization of matrices. A vector is a 1-dimensional tensor, a matrix is a 2-dimensional tensor, an array with three indices is a 3-dimensional tensor (RGB color images for example). The fundamental data structure for neural networks are tensors and PyTorch (as well as pretty much every other deep learning framework) is built around tensors.\n\n<img src=\"assets/tensor_examples.svg\" width=600px>\n\nWith the basics covered, it's time to explore how we can use PyTorch to build a simple neural network.", "_____no_output_____" ] ], [ [ "# First, import PyTorch\nimport torch", "_____no_output_____" ], [ "def activation(x):\n \"\"\" Sigmoid activation function \n \n Arguments\n ---------\n x: torch.Tensor\n \"\"\"\n return 1/(1+torch.exp(-x))", "_____no_output_____" ], [ "### Generate some data\ntorch.manual_seed(7) # Set the random seed so things are predictable\n\n# Features are 5 random normal variables\nfeatures = torch.randn((1, 5))\n# True weights for our data, random normal variables again\nweights = torch.randn_like(features)\n# and a true bias term\nbias = torch.randn((1, 1))", "_____no_output_____" ] ], [ [ "Above I generated data we can use to get the output of our simple network. This is all just random for now, going forward we'll start using normal data. Going through each relevant line:\n\n`features = torch.randn((1, 5))` creates a tensor with shape `(1, 5)`, one row and five columns, that contains values randomly distributed according to the normal distribution with a mean of zero and standard deviation of one. \n\n`weights = torch.randn_like(features)` creates another tensor with the same shape as `features`, again containing values from a normal distribution.\n\nFinally, `bias = torch.randn((1, 1))` creates a single value from a normal distribution.\n\nPyTorch tensors can be added, multiplied, subtracted, etc, just like Numpy arrays. In general, you'll use PyTorch tensors pretty much the same way you'd use Numpy arrays. They come with some nice benefits though such as GPU acceleration which we'll get to later. For now, use the generated data to calculate the output of this simple single layer network. \n> **Exercise**: Calculate the output of the network with input features `features`, weights `weights`, and bias `bias`. Similar to Numpy, PyTorch has a [`torch.sum()`](https://pytorch.org/docs/stable/torch.html#torch.sum) function, as well as a `.sum()` method on tensors, for taking sums. Use the function `activation` defined above as the activation function.", "_____no_output_____" ] ], [ [ "## Calculate the output of this network using the weights and bias tensors\ny = activation(torch.sum(features * weights) + bias)\n# or y = activation((features * weights).sum() + bias) tensor has .sum method\nprint(y)", "tensor([[0.1595]])\n" ] ], [ [ "You can do the multiplication and sum in the same operation using a matrix multiplication. In general, you'll want to use matrix multiplications since they are more efficient and accelerated using modern libraries and high-performance computing on GPUs.\n\nHere, we want to do a matrix multiplication of the features and the weights. For this we can use [`torch.mm()`](https://pytorch.org/docs/stable/torch.html#torch.mm) or [`torch.matmul()`](https://pytorch.org/docs/stable/torch.html#torch.matmul) which is somewhat more complicated and supports broadcasting. If we try to do it with `features` and `weights` as they are, we'll get an error\n\n```python\n>> torch.mm(features, weights)\n\n---------------------------------------------------------------------------\nRuntimeError Traceback (most recent call last)\n<ipython-input-13-15d592eb5279> in <module>()\n----> 1 torch.mm(features, weights)\n\nRuntimeError: size mismatch, m1: [1 x 5], m2: [1 x 5] at /Users/soumith/minicondabuild3/conda-bld/pytorch_1524590658547/work/aten/src/TH/generic/THTensorMath.c:2033\n```\n\nAs you're building neural networks in any framework, you'll see this often. Really often. What's happening here is our tensors aren't the correct shapes to perform a matrix multiplication. Remember that for matrix multiplications, the number of columns in the first tensor must equal to the number of rows in the second column. Both `features` and `weights` have the same shape, `(1, 5)`. This means we need to change the shape of `weights` to get the matrix multiplication to work.\n\n**Note:** To see the shape of a tensor called `tensor`, use `tensor.shape`. If you're building neural networks, you'll be using this method often.\n\nThere are a few options here: [`weights.reshape()`](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.reshape), [`weights.resize_()`](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.resize_), and [`weights.view()`](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.view).\n\n* `weights.reshape(a, b)` will return a new tensor with the same data as `weights` with size `(a, b)` sometimes, and sometimes a clone, as in it copies the data to another part of memory.\n* `weights.resize_(a, b)` returns the same tensor with a different shape. However, if the new shape results in fewer elements than the original tensor, some elements will be removed from the tensor (but not from memory). If the new shape results in more elements than the original tensor, new elements will be uninitialized in memory. Here I should note that the underscore at the end of the method denotes that this method is performed **in-place**. Here is a great forum thread to [read more about in-place operations](https://discuss.pytorch.org/t/what-is-in-place-operation/16244) in PyTorch.\n* `weights.view(a, b)` will return a new tensor with the same data as `weights` with size `(a, b)`.\n\nI usually use `.view()`, but any of the three methods will work for this. So, now we can reshape `weights` to have five rows and one column with something like `weights.view(5, 1)`.\n\n> **Exercise**: Calculate the output of our little network using matrix multiplication.", "_____no_output_____" ] ], [ [ "## Calculate the output of this network using matrix multiplication\ny = activation(torch.mm(features, weights.view(5,1)) + bias)\nprint(y)", "tensor([[0.1595]])\n" ] ], [ [ "### Stack them up!\n\nThat's how you can calculate the output for a single neuron. The real power of this algorithm happens when you start stacking these individual units into layers and stacks of layers, into a network of neurons. The output of one layer of neurons becomes the input for the next layer. With multiple input units and output units, we now need to express the weights as a matrix.\n\n<img src='assets/multilayer_diagram_weights.png' width=450px>\n\nThe first layer shown on the bottom here are the inputs, understandably called the **input layer**. The middle layer is called the **hidden layer**, and the final layer (on the right) is the **output layer**. We can express this network mathematically with matrices again and use matrix multiplication to get linear combinations for each unit in one operation. For example, the hidden layer ($h_1$ and $h_2$ here) can be calculated \n\n$$\n\\vec{h} = [h_1 \\, h_2] = \n\\begin{bmatrix}\nx_1 \\, x_2 \\cdots \\, x_n\n\\end{bmatrix}\n\\cdot \n\\begin{bmatrix}\n w_{11} & w_{12} \\\\\n w_{21} &w_{22} \\\\\n \\vdots &\\vdots \\\\\n w_{n1} &w_{n2}\n\\end{bmatrix}\n$$\n\nThe output for this small network is found by treating the hidden layer as inputs for the output unit. The network output is expressed simply\n\n$$\ny = f_2 \\! \\left(\\, f_1 \\! \\left(\\vec{x} \\, \\mathbf{W_1}\\right) \\mathbf{W_2} \\right)\n$$", "_____no_output_____" ] ], [ [ "### Generate some data\ntorch.manual_seed(7) # Set the random seed so things are predictable\n\n# Features are 3 random normal variables\nfeatures = torch.randn((1, 3))\n\n# Define the size of each layer in our network\nn_input = features.shape[1] # Number of input units, must match number of input features\nn_hidden = 2 # Number of hidden units \nn_output = 1 # Number of output units\n\n# Weights for inputs to hidden layer\nW1 = torch.randn(n_input, n_hidden)\n# Weights for hidden layer to output layer\nW2 = torch.randn(n_hidden, n_output)\n\n# and bias terms for hidden and output layers\nB1 = torch.randn((1, n_hidden))\nB2 = torch.randn((1, n_output))", "_____no_output_____" ] ], [ [ "> **Exercise:** Calculate the output for this multi-layer network using the weights `W1` & `W2`, and the biases, `B1` & `B2`. ", "_____no_output_____" ] ], [ [ "## Your solution here\nh = activation(torch.mm(features,W1) + B1)\noutput = activation(torch.mm(h, W2) + B2)\nprint(output)", "tensor([[0.3171]])\n" ] ], [ [ "If you did this correctly, you should see the output `tensor([[ 0.3171]])`.\n\nThe number of hidden units a parameter of the network, often called a **hyperparameter** to differentiate it from the weights and biases parameters. As you'll see later when we discuss training a neural network, the more hidden units a network has, and the more layers, the better able it is to learn from data and make accurate predictions.", "_____no_output_____" ], [ "## Numpy to Torch and back\n\nSpecial bonus section! PyTorch has a great feature for converting between Numpy arrays and Torch tensors. To create a tensor from a Numpy array, use `torch.from_numpy()`. To convert a tensor to a Numpy array, use the `.numpy()` method.", "_____no_output_____" ] ], [ [ "import numpy as np\na = np.random.rand(4,3)\na", "_____no_output_____" ], [ "b = torch.from_numpy(a)\nb", "_____no_output_____" ], [ "b.numpy()", "_____no_output_____" ] ], [ [ "The memory is shared between the Numpy array and Torch tensor, so if you change the values in-place of one object, the other will change as well.", "_____no_output_____" ] ], [ [ "# Multiply PyTorch Tensor by 2, in place\nb.mul_(2)", "_____no_output_____" ], [ "# Numpy array matches new values from Tensor\na", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
4a7d683d97df0a22c47ea6a7126c21123b9ee330
1,051
ipynb
Jupyter Notebook
Untitled0.ipynb
xuzhe79/opencv
bf2852db5edcb3645571ac7a703a29ecbed8a096
[ "BSD-3-Clause" ]
null
null
null
Untitled0.ipynb
xuzhe79/opencv
bf2852db5edcb3645571ac7a703a29ecbed8a096
[ "BSD-3-Clause" ]
null
null
null
Untitled0.ipynb
xuzhe79/opencv
bf2852db5edcb3645571ac7a703a29ecbed8a096
[ "BSD-3-Clause" ]
null
null
null
23.355556
222
0.488107
[ [ [ "<a href=\"https://colab.research.google.com/github/xuzhe79/opencv/blob/master/Untitled0.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "import tensorflow as tf\n\nprint(tf.__version__)\n", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code" ] ]
4a7d6afeec364bbe79673072e219bbaa13320183
3,487
ipynb
Jupyter Notebook
src/weather_crawler.ipynb
NTUEE-325/MakeNTU
fd2608e35ceac7b4362fef57e1f2fc03daecb707
[ "MIT" ]
1
2022-03-20T05:42:47.000Z
2022-03-20T05:42:47.000Z
src/weather_crawler.ipynb
chungen04/MakeNTU2022
fd2608e35ceac7b4362fef57e1f2fc03daecb707
[ "MIT" ]
null
null
null
src/weather_crawler.ipynb
chungen04/MakeNTU2022
fd2608e35ceac7b4362fef57e1f2fc03daecb707
[ "MIT" ]
1
2022-03-20T05:42:53.000Z
2022-03-20T05:42:53.000Z
29.550847
139
0.50846
[ [ [ "# Reference: https://qiita.com/Cyber_Hacnosuke/items/122cec35d299c4d01f10\n#さっきの\nplace_codeA = [14, 44, 81]\nplace_codeB = [47412, 47662, 47762]\nplace_name = [\"札幌\", \"東京\", \"下關\"] \n\nimport requests\nfrom bs4 import BeautifulSoup #ダウンロードしてなかったらpipでできるからやってね。\nimport csv\n\n# URLで年と月ごとの設定ができるので%sで指定した英数字を埋め込めるようにします。\nbase_url = \"http://www.data.jma.go.jp/obd/stats/etrn/view/hourly_s1.php?prec_no=%s&block_no=%s&year=2022&month=2&day=%s&view=p1\"\n\n#取ったデータをfloat型に変えるやつ。(データが取れなかったとき気象庁は\"/\"を埋め込んでいるから0に変える)\ndef str2float(str):\n try:\n return float(str)\n except:\n return 0.0\n\n\nif __name__ == \"__main__\":\n #都市を網羅します\n for place in place_name:\n #最終的にデータを集めるリスト (下に書いてある初期値は一行目。つまり、ヘッダー。)\n# All_list = [['年月日', '日照時間(h)', '太陽總輻射量(MJ/㎡)']]\n All_list = []\n print(place)\n index = place_name.index(place)\n # for文で2007年~2017年までの11回。\n for day in range(1,29):\n #2つの都市コードと年と月を当てはめる。\n r = requests.get(base_url%(place_codeA[index], place_codeB[index], day))\n r.encoding = r.apparent_encoding\n\n # まずはサイトごとスクレイピング\n soup = BeautifulSoup(r.text)\n # findAllで条件に一致するものをすべて抜き出します。\n # 今回の条件はtrタグでclassがmtxになってるものです。\n rows = soup.findAll('tr',class_='mtx')\n # print(rows[8])\n\n # 表の最初の1~4行目はカラム情報なのでスライスする。(indexだから初めは0だよ)\n # 【追記】2020/3/11 申し訳ございません。間違えてました。\n rows = rows[8:20]\n\n # 1日〜最終日までの1行を網羅し、取得します。\n for row in rows:\n # 今度はtrのなかのtdをすべて抜き出します\n data = row.findAll('td')\n\n #1行の中には様々なデータがあるので全部取り出す。\n # ★ポイント\n rowData = [] #初期化\n rowData.append(str(2022) + \"/\" + str(2) + \"/\" + str(day) + \" \" + str(data[0].string))\n rowData.append(str2float(data[10].string))\n rowData.append(str2float(data[11].string))\n\n #次の行にデータを追加\n All_list.append(rowData)\n\n #都市ごとにデータをファイルを新しく生成して書き出す。(csvファイル形式。名前は都市名)\n with open(place + '.csv', 'w', encoding='utf-8') as file:\n writer = csv.writer(file, lineterminator='\\n')\n writer.writerows(All_list)", "札幌\n東京\n下關\n" ] ] ]
[ "code" ]
[ [ "code" ] ]
4a7d71d7dde57836666151761d7cadaad6bf179c
76,303
ipynb
Jupyter Notebook
Feature Engineering - Titanic Case Study/Titanic-PlotCategoricalFeatures.ipynb
behzad1195/Data-Prepration-in-Python
81833d408bd886ab7a574ffdf880ca64a4b12f11
[ "MIT" ]
1
2021-08-18T14:47:10.000Z
2021-08-18T14:47:10.000Z
Feature Engineering - Titanic Case Study/Titanic-PlotCategoricalFeatures.ipynb
behzad1195/Data-Prepration-in-Python
81833d408bd886ab7a574ffdf880ca64a4b12f11
[ "MIT" ]
null
null
null
Feature Engineering - Titanic Case Study/Titanic-PlotCategoricalFeatures.ipynb
behzad1195/Data-Prepration-in-Python
81833d408bd886ab7a574ffdf880ca64a4b12f11
[ "MIT" ]
null
null
null
172.631222
21,900
0.872312
[ [ [ "## Explore The Data: Plot Categorical Features\n\nUsing the Titanic dataset from [this](https://www.kaggle.com/c/titanic/overview) Kaggle competition.\n\nThis dataset contains information about 891 people who were on board the ship when departed on April 15th, 1912. As noted in the description on Kaggle's website, some people aboard the ship were more likely to survive the wreck than others. There were not enough lifeboats for everybody so women, children, and the upper-class were prioritized. Using the information about these 891 passengers, the challenge is to build a model to predict which people would survive based on the following fields:\n\n- **Name** (str) - Name of the passenger\n- **Pclass** (int) - Ticket class (1st, 2nd, or 3rd)\n- **Sex** (str) - Gender of the passenger\n- **Age** (float) - Age in years\n- **SibSp** (int) - Number of siblings and spouses aboard\n- **Parch** (int) - Number of parents and children aboard\n- **Ticket** (str) - Ticket number\n- **Fare** (float) - Passenger fare\n- **Cabin** (str) - Cabin number\n- **Embarked** (str) - Port of embarkation (C = Cherbourg, Q = Queenstown, S = Southampton)\n\n**This section focuses on exploring the `Name`, `Sex`, `Ticket`, `Cabin`, and `Embarked` features.**", "_____no_output_____" ], [ "### Read In Data", "_____no_output_____" ] ], [ [ "# Read in our data\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n%matplotlib inline\nimport numpy as np\nimport pandas as pd\n\ntitanic = pd.read_csv('titanic.csv',\n usecols=['Survived', 'Name', 'Sex', 'Cabin', 'Embarked'])\ntitanic.head()", "_____no_output_____" ] ], [ [ "### Plot Categorical Features", "_____no_output_____" ] ], [ [ "# Create a title feature by parsing passenger name and create a cabin indicator variable\ntitanic['Title_Raw'] = titanic['Name'].apply(lambda x: x.split(',')[1].split('.')[0].strip())\ntitanic['Title'] = titanic['Title_Raw'].apply(lambda x: x if x in ['Master', 'Miss', 'Mr', 'Mrs'] else 'Other')\ntitanic['Cabin_ind'] = np.where(titanic['Cabin'].isnull(), 0, 1)\ntitanic.head()", "_____no_output_____" ] ], [ [ "* we just built 'Title' column for the sake of visualization because as we saw the only group that have strong relationship as well as larger number are Mr, Miss, Mrs, and Master\n* this also applied for cabin because there was strong survival rate with missing cabin", "_____no_output_____" ] ], [ [ "# Generate categorical plots for features\nfor col in ['Title', 'Sex', 'Cabin_ind', 'Embarked']:\n sns.catplot(x=col, y='Survived', data=titanic, kind='point', aspect=2, )\n plt.ylim(0, 1)", "_____no_output_____" ], [ "# Split embarked by whether the passenger had a cabin\ntitanic.pivot_table('Survived', index='Cabin_ind', columns='Embarked', aggfunc='count')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
4a7d74f028228593ceb207a27b9198c9e29dacbb
15,249
ipynb
Jupyter Notebook
projects/introduction-to-pytorch/2-tensors.ipynb
Code360In/data-science
cb6093e898ccb860e76914057a52f751d6b8a4d2
[ "MIT" ]
2
2020-02-04T20:10:26.000Z
2021-04-01T08:38:55.000Z
projects/introduction-to-pytorch/2-tensors.ipynb
Code360In/data-science
cb6093e898ccb860e76914057a52f751d6b8a4d2
[ "MIT" ]
null
null
null
projects/introduction-to-pytorch/2-tensors.ipynb
Code360In/data-science
cb6093e898ccb860e76914057a52f751d6b8a4d2
[ "MIT" ]
5
2021-04-01T08:40:22.000Z
2022-03-25T03:52:36.000Z
28.343866
499
0.437143
[ [ [ "%matplotlib inline", "_____no_output_____" ] ], [ [ "# Tensors\n\nTensors are a specialized data structure that are very similar to arrays and matrices. \nIn PyTorch, we use tensors to encode the inputs and outputs of a model, as well as the model’s parameters.\n\nTensors are similar to [NumPy’s](https://numpy.org/) ndarrays, except that tensors can run on GPUs or other hardware accelerators. In fact, tensors and NumPy arrays can often share the same underlying memory, eliminating the need to copy data (see `bridge-to-np-label`). Tensors are also optimized for automatic differentiation (we'll see more about that later in the Autograd unit). If you’re familiar with `ndarrays`, you’ll be right at home with the Tensor API. If not, follow along!\n\nLet's start by setting up our environment.\n\n", "_____no_output_____" ] ], [ [ "import torch\nimport numpy as np", "_____no_output_____" ] ], [ [ "# Initializing a Tensor\n\nTensors can be initialized in various ways. Take a look at the following examples:\n\n## Directly from data\n\nTensors can be created directly from data. The data type is automatically inferred.\n\n", "_____no_output_____" ] ], [ [ "data = [[1, 2],[3, 4]]\nx_data = torch.tensor(data)", "_____no_output_____" ] ], [ [ "## From a NumPy array\n\nTensors can be created from NumPy arrays (and vice versa - see `bridge-to-np-label`).\n\n", "_____no_output_____" ] ], [ [ "np_array = np.array(data)\nx_np = torch.from_numpy(np_array)", "_____no_output_____" ] ], [ [ "## From another tensor:\n\nThe new tensor retains the properties (shape, data type) of the argument tensor, unless explicitly overridden.\n\n", "_____no_output_____" ] ], [ [ "x_ones = torch.ones_like(x_data) # retains the properties of x_data\nprint(f\"Ones Tensor: \\n {x_ones} \\n\")\n\nx_rand = torch.rand_like(x_data, dtype=torch.float) # overrides the datatype of x_data\nprint(f\"Random Tensor: \\n {x_rand} \\n\")", "Ones Tensor: \n tensor([[1, 1],\n [1, 1]]) \n\nRandom Tensor: \n tensor([[0.7509, 0.8505],\n [0.5897, 0.1789]]) \n\n" ] ], [ [ "## With random or constant values:\n\n``shape`` is a tuple of tensor dimensions. In the functions below, it determines the dimensionality of the output tensor.\n\n", "_____no_output_____" ] ], [ [ "shape = (2,3,)\nrand_tensor = torch.rand(shape)\nones_tensor = torch.ones(shape)\nzeros_tensor = torch.zeros(shape)\n\nprint(f\"Random Tensor: \\n {rand_tensor} \\n\")\nprint(f\"Ones Tensor: \\n {ones_tensor} \\n\")\nprint(f\"Zeros Tensor: \\n {zeros_tensor}\")", "Random Tensor: \n tensor([[0.2260, 0.4615, 0.1648],\n [0.6388, 0.9319, 0.6268]]) \n\nOnes Tensor: \n tensor([[1., 1., 1.],\n [1., 1., 1.]]) \n\nZeros Tensor: \n tensor([[0., 0., 0.],\n [0., 0., 0.]])\n" ] ], [ [ "# Attributes of a Tensor\n\nTensor attributes describe their shape, data type, and the device on which they are stored.\n\n", "_____no_output_____" ] ], [ [ "tensor = torch.rand(3,4)\n\nprint(f\"Shape of tensor: {tensor.shape}\")\nprint(f\"Datatype of tensor: {tensor.dtype}\")\nprint(f\"Device tensor is stored on: {tensor.device}\")", "Shape of tensor: torch.Size([3, 4])\nDatatype of tensor: torch.float32\nDevice tensor is stored on: cpu\n" ] ], [ [ "# Operations on Tensors\n\nOver 100 tensor operations, including arithmetic, linear algebra, matrix manipulation (transposing, \nindexing, slicing), sampling and more are\ncomprehensively described [here](https://pytorch.org/docs/stable/torch.html).\n\nEach of these operations can be run on the GPU (at typically higher speeds than on a\nCPU).\n\nBy default, tensors are created on the CPU. We need to explicitly move tensors to the GPU using \n`.to` method (after checking for GPU availability). Keep in mind that copying large tensors\nacross devices can be expensive in terms of time and memory!\n\n", "_____no_output_____" ] ], [ [ "# We move our tensor to the GPU if available\nif torch.cuda.is_available():\n tensor = tensor.to('cuda')", "_____no_output_____" ] ], [ [ "Try out some of the operations from the list.\nIf you're familiar with the NumPy API, you'll find the Tensor API a breeze to use.\n\n## Standard numpy-like indexing and slicing:", "_____no_output_____" ] ], [ [ "tensor = torch.ones(4, 4)\nprint('First row: ',tensor[0])\nprint('First column: ', tensor[:, 0])\nprint('Last column:', tensor[..., -1])\ntensor[:,1] = 0\nprint(tensor)", "First row: tensor([1., 1., 1., 1.])\nFirst column: tensor([1., 1., 1., 1.])\nLast column: tensor([1., 1., 1., 1.])\ntensor([[1., 0., 1., 1.],\n [1., 0., 1., 1.],\n [1., 0., 1., 1.],\n [1., 0., 1., 1.]])\n" ] ], [ [ "## Joining tensors\nYou can use `torch.cat` to concatenate a sequence of tensors along a given dimension.\nSee also [torch.stack](https://pytorch.org/docs/stable/generated/torch.stack.html),\nanother tensor joining op that is subtly different from ``torch.cat``.\n\n", "_____no_output_____" ] ], [ [ "t1 = torch.cat([tensor, tensor, tensor], dim=1)\nprint(t1)", "tensor([[1., 0., 1., 1., 1., 0., 1., 1., 1., 0., 1., 1.],\n [1., 0., 1., 1., 1., 0., 1., 1., 1., 0., 1., 1.],\n [1., 0., 1., 1., 1., 0., 1., 1., 1., 0., 1., 1.],\n [1., 0., 1., 1., 1., 0., 1., 1., 1., 0., 1., 1.]])\n" ] ], [ [ "## Arithmetic operations\n\n", "_____no_output_____" ] ], [ [ "# This computes the matrix multiplication between two tensors. y1, y2, y3 will have the same value\ny1 = tensor @ tensor.T\ny2 = tensor.matmul(tensor.T)\n\ny3 = torch.rand_like(tensor)\ntorch.matmul(tensor, tensor.T, out=y3)\n\n\n# This computes the element-wise product. z1, z2, z3 will have the same value\nz1 = tensor * tensor\nz2 = tensor.mul(tensor)\n\nz3 = torch.rand_like(tensor)\ntorch.mul(tensor, tensor, out=z3)", "_____no_output_____" ] ], [ [ "## Single-element tensors\nIf you have a one-element tensor, for example by aggregating all\nvalues of a tensor into one value, you can convert it to a Python\nnumerical value using `item()`:\n\n", "_____no_output_____" ] ], [ [ "agg = tensor.sum()\nagg_item = agg.item() \nprint(agg_item, type(agg_item))", "12.0 <class 'float'>\n" ] ], [ [ "## In-place operations\nOperations that store the result into the operand are called in-place. They are denoted by a ``_`` suffix. \nFor example: ``x.copy_(y)``, ``x.t_()``, will change ``x``.\n\n> **Note:** In-place operations save some memory, but can be problematic when computing derivatives because of an immediate loss of history. Hence, their use is discouraged.\n\n", "_____no_output_____" ] ], [ [ "print(tensor, \"\\n\")\ntensor.add_(5)\nprint(tensor)", "tensor([[1., 0., 1., 1.],\n [1., 0., 1., 1.],\n [1., 0., 1., 1.],\n [1., 0., 1., 1.]]) \n\ntensor([[6., 5., 6., 6.],\n [6., 5., 6., 6.],\n [6., 5., 6., 6.],\n [6., 5., 6., 6.]])\n" ] ], [ [ "## Bridge with NumPy\n\nTensors on the CPU and NumPy arrays can share their underlying memory\nlocations, and changing one will change\tthe other.\n\n### Tensor to NumPy array", "_____no_output_____" ] ], [ [ "t = torch.ones(5)\nprint(f\"t: {t}\")\nn = t.numpy()\nprint(f\"n: {n}\")", "t: tensor([1., 1., 1., 1., 1.])\nn: [1. 1. 1. 1. 1.]\n" ] ], [ [ "A change in the tensor reflects in the NumPy array.\n\n", "_____no_output_____" ] ], [ [ "t.add_(1)\nprint(f\"t: {t}\")\nprint(f\"n: {n}\")", "t: tensor([2., 2., 2., 2., 2.])\nn: [2. 2. 2. 2. 2.]\n" ] ], [ [ "### NumPy array to Tensor", "_____no_output_____" ] ], [ [ "n = np.ones(5)\nt = torch.from_numpy(n)", "_____no_output_____" ] ], [ [ "Changes in the NumPy array reflects in the tensor.\n\n", "_____no_output_____" ] ], [ [ "np.add(n, 1, out=n)\nprint(f\"t: {t}\")\nprint(f\"n: {n}\")", "t: tensor([2., 2., 2., 2., 2.], dtype=torch.float64)\nn: [2. 2. 2. 2. 2.]\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a7d919651ae3dd3dd7d7517cb828dd0b3656268
18,123
ipynb
Jupyter Notebook
notebook/04/48 Renaming and Reordering multiple Spark Data Frame Columns.ipynb
SatoruItaya/databricks-certification-prepare
f0aeb9c6aef5b3aa70f8553e675397b9b36fe241
[ "Apache-2.0" ]
null
null
null
notebook/04/48 Renaming and Reordering multiple Spark Data Frame Columns.ipynb
SatoruItaya/databricks-certification-prepare
f0aeb9c6aef5b3aa70f8553e675397b9b36fe241
[ "Apache-2.0" ]
null
null
null
notebook/04/48 Renaming and Reordering multiple Spark Data Frame Columns.ipynb
SatoruItaya/databricks-certification-prepare
f0aeb9c6aef5b3aa70f8553e675397b9b36fe241
[ "Apache-2.0" ]
null
null
null
9,061.5
18,122
0.556199
[ [ [ "%run \"./02 Creating Spark Data Frame to Select and Rename Columns\"", "_____no_output_____" ], [ "# required columns from original list\nrequired_columns = ['id', 'first_name', 'last_name', 'email', 'phone_numbers', 'courses']\n\n# new column name list\ntarget_column_names = ['user_id', 'user_first_name', 'user_last_name', 'user_email', 'user_phone_numbers', 'enrolled_courses']", "_____no_output_____" ] ], [ [ "* Get the data from required columns and rename the columns to new names as per target column names.\n* We should be able to use `select` to get the data from required columns.\n* We should be able to rename the columns using `toDF`\n* `select` and `toDF` takes variable number of arguments. We can use `*required_columns` while invoking `select` to get the data from required columns. It is applicable for `toDF` as well.", "_____no_output_____" ] ], [ [ "help(users_df.toDF)", "_____no_output_____" ], [ "users_df. \\\n select(required_columns). \\\n show()", "_____no_output_____" ], [ "users_df. \\\n select(required_columns). \\\n toDF(*target_column_names). \\\n show()", "_____no_output_____" ], [ "def myDF(*cols):\n print(type(cols))\n print(cols)", "_____no_output_____" ], [ "myDF(*['f1', 'f2'])", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
4a7d9317d2c23fd7236ae15152d2d25ee1b536ea
767,370
ipynb
Jupyter Notebook
Course4:Convolutional_Neural_Networks/Week4:Special_applications:Face_recognition&Neural_style_transfer/Neural Style Transfer/Art Generation with Neural Style Transfer - v2.ipynb
starFalll/deeplearning.ai-learning-notebook
60be627c998c625bf493aab08c34cbc360241d01
[ "Apache-2.0" ]
null
null
null
Course4:Convolutional_Neural_Networks/Week4:Special_applications:Face_recognition&Neural_style_transfer/Neural Style Transfer/Art Generation with Neural Style Transfer - v2.ipynb
starFalll/deeplearning.ai-learning-notebook
60be627c998c625bf493aab08c34cbc360241d01
[ "Apache-2.0" ]
null
null
null
Course4:Convolutional_Neural_Networks/Week4:Special_applications:Face_recognition&Neural_style_transfer/Neural Style Transfer/Art Generation with Neural Style Transfer - v2.ipynb
starFalll/deeplearning.ai-learning-notebook
60be627c998c625bf493aab08c34cbc360241d01
[ "Apache-2.0" ]
null
null
null
569.688196
309,334
0.929138
[ [ [ "# Deep Learning & Art: Neural Style Transfer\n\nWelcome to the second assignment of this week. In this assignment, you will learn about Neural Style Transfer. This algorithm was created by Gatys et al. (2015) (https://arxiv.org/abs/1508.06576). \n\n**In this assignment, you will:**\n- Implement the neural style transfer algorithm \n- Generate novel artistic images using your algorithm \n\nMost of the algorithms you've studied optimize a cost function to get a set of parameter values. In Neural Style Transfer, you'll optimize a cost function to get pixel values!", "_____no_output_____" ] ], [ [ "import os\nimport sys\nimport scipy.io\nimport scipy.misc\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import imshow\nfrom PIL import Image\nfrom nst_utils import *\nimport numpy as np\nimport tensorflow as tf\n\n%matplotlib inline", "_____no_output_____" ] ], [ [ "## 1 - Problem Statement\n\nNeural Style Transfer (NST) is one of the most fun techniques in deep learning. As seen below, it merges two images, namely, a \"content\" image (C) and a \"style\" image (S), to create a \"generated\" image (G). The generated image G combines the \"content\" of the image C with the \"style\" of image S. \n\nIn this example, you are going to generate an image of the Louvre museum in Paris (content image C), mixed with a painting by Claude Monet, a leader of the impressionist movement (style image S).\n<img src=\"images/louvre_generated.png\" style=\"width:750px;height:200px;\">\n\nLet's see how you can do this. ", "_____no_output_____" ], [ "## 2 - Transfer Learning\n\nNeural Style Transfer (NST) uses a previously trained convolutional network, and builds on top of that. The idea of using a network trained on a different task and applying it to a new task is called transfer learning. \n\nFollowing the original NST paper (https://arxiv.org/abs/1508.06576), we will use the VGG network. Specifically, we'll use VGG-19, a 19-layer version of the VGG network. This model has already been trained on the very large ImageNet database, and thus has learned to recognize a variety of low level features (at the earlier layers) and high level features (at the deeper layers). \n\nRun the following code to load parameters from the VGG model. This may take a few seconds. ", "_____no_output_____" ] ], [ [ "model = load_vgg_model(\"pretrained-model/imagenet-vgg-verydeep-19.mat\")\nprint(model)", "{'input': <tf.Variable 'Variable:0' shape=(1, 300, 400, 3) dtype=float32_ref>, 'conv1_1': <tf.Tensor 'Relu:0' shape=(1, 300, 400, 64) dtype=float32>, 'conv1_2': <tf.Tensor 'Relu_1:0' shape=(1, 300, 400, 64) dtype=float32>, 'avgpool1': <tf.Tensor 'AvgPool:0' shape=(1, 150, 200, 64) dtype=float32>, 'conv2_1': <tf.Tensor 'Relu_2:0' shape=(1, 150, 200, 128) dtype=float32>, 'conv2_2': <tf.Tensor 'Relu_3:0' shape=(1, 150, 200, 128) dtype=float32>, 'avgpool2': <tf.Tensor 'AvgPool_1:0' shape=(1, 75, 100, 128) dtype=float32>, 'conv3_1': <tf.Tensor 'Relu_4:0' shape=(1, 75, 100, 256) dtype=float32>, 'conv3_2': <tf.Tensor 'Relu_5:0' shape=(1, 75, 100, 256) dtype=float32>, 'conv3_3': <tf.Tensor 'Relu_6:0' shape=(1, 75, 100, 256) dtype=float32>, 'conv3_4': <tf.Tensor 'Relu_7:0' shape=(1, 75, 100, 256) dtype=float32>, 'avgpool3': <tf.Tensor 'AvgPool_2:0' shape=(1, 38, 50, 256) dtype=float32>, 'conv4_1': <tf.Tensor 'Relu_8:0' shape=(1, 38, 50, 512) dtype=float32>, 'conv4_2': <tf.Tensor 'Relu_9:0' shape=(1, 38, 50, 512) dtype=float32>, 'conv4_3': <tf.Tensor 'Relu_10:0' shape=(1, 38, 50, 512) dtype=float32>, 'conv4_4': <tf.Tensor 'Relu_11:0' shape=(1, 38, 50, 512) dtype=float32>, 'avgpool4': <tf.Tensor 'AvgPool_3:0' shape=(1, 19, 25, 512) dtype=float32>, 'conv5_1': <tf.Tensor 'Relu_12:0' shape=(1, 19, 25, 512) dtype=float32>, 'conv5_2': <tf.Tensor 'Relu_13:0' shape=(1, 19, 25, 512) dtype=float32>, 'conv5_3': <tf.Tensor 'Relu_14:0' shape=(1, 19, 25, 512) dtype=float32>, 'conv5_4': <tf.Tensor 'Relu_15:0' shape=(1, 19, 25, 512) dtype=float32>, 'avgpool5': <tf.Tensor 'AvgPool_4:0' shape=(1, 10, 13, 512) dtype=float32>}\n" ] ], [ [ "The model is stored in a python dictionary where each variable name is the key and the corresponding value is a tensor containing that variable's value. To run an image through this network, you just have to feed the image to the model. In TensorFlow, you can do so using the [tf.assign](https://www.tensorflow.org/api_docs/python/tf/assign) function. In particular, you will use the assign function like this: \n```python\nmodel[\"input\"].assign(image)\n```\nThis assigns the image as an input to the model. After this, if you want to access the activations of a particular layer, say layer `4_2` when the network is run on this image, you would run a TensorFlow session on the correct tensor `conv4_2`, as follows: \n```python\nsess.run(model[\"conv4_2\"])\n```", "_____no_output_____" ], [ "## 3 - Neural Style Transfer \n\nWe will build the NST algorithm in three steps:\n\n- Build the content cost function $J_{content}(C,G)$\n- Build the style cost function $J_{style}(S,G)$\n- Put it together to get $J(G) = \\alpha J_{content}(C,G) + \\beta J_{style}(S,G)$. \n\n### 3.1 - Computing the content cost\n\nIn our running example, the content image C will be the picture of the Louvre Museum in Paris. Run the code below to see a picture of the Louvre.", "_____no_output_____" ] ], [ [ "content_image = scipy.misc.imread(\"images/louvre.jpg\")\nimshow(content_image)", "_____no_output_____" ] ], [ [ "The content image (C) shows the Louvre museum's pyramid surrounded by old Paris buildings, against a sunny sky with a few clouds.\n\n** 3.1.1 - How do you ensure the generated image G matches the content of the image C?**\n\nAs we saw in lecture, the earlier (shallower) layers of a ConvNet tend to detect lower-level features such as edges and simple textures, and the later (deeper) layers tend to detect higher-level features such as more complex textures as well as object classes. \n\nWe would like the \"generated\" image G to have similar content as the input image C. Suppose you have chosen some layer's activations to represent the content of an image. In practice, you'll get the most visually pleasing results if you choose a layer in the middle of the network--neither too shallow nor too deep. (After you have finished this exercise, feel free to come back and experiment with using different layers, to see how the results vary.)\n\nSo, suppose you have picked one particular hidden layer to use. Now, set the image C as the input to the pretrained VGG network, and run forward propagation. Let $a^{(C)}$ be the hidden layer activations in the layer you had chosen. (In lecture, we had written this as $a^{[l](C)}$, but here we'll drop the superscript $[l]$ to simplify the notation.) This will be a $n_H \\times n_W \\times n_C$ tensor. Repeat this process with the image G: Set G as the input, and run forward progation. Let $$a^{(G)}$$ be the corresponding hidden layer activation. We will define as the content cost function as:\n\n$$J_{content}(C,G) = \\frac{1}{4 \\times n_H \\times n_W \\times n_C}\\sum _{ \\text{all entries}} (a^{(C)} - a^{(G)})^2\\tag{1} $$\n\nHere, $n_H, n_W$ and $n_C$ are the height, width and number of channels of the hidden layer you have chosen, and appear in a normalization term in the cost. For clarity, note that $a^{(C)}$ and $a^{(G)}$ are the volumes corresponding to a hidden layer's activations. In order to compute the cost $J_{content}(C,G)$, it might also be convenient to unroll these 3D volumes into a 2D matrix, as shown below. (Technically this unrolling step isn't needed to compute $J_{content}$, but it will be good practice for when you do need to carry out a similar operation later for computing the style const $J_{style}$.)\n\n<img src=\"images/NST_LOSS.png\" style=\"width:800px;height:400px;\">\n\n**Exercise:** Compute the \"content cost\" using TensorFlow. \n\n**Instructions**: The 3 steps to implement this function are:\n1. Retrieve dimensions from a_G: \n - To retrieve dimensions from a tensor X, use: `X.get_shape().as_list()`\n2. Unroll a_C and a_G as explained in the picture above\n - If you are stuck, take a look at [Hint1](https://www.tensorflow.org/versions/r1.3/api_docs/python/tf/transpose) and [Hint2](https://www.tensorflow.org/versions/r1.2/api_docs/python/tf/reshape).\n3. Compute the content cost:\n - If you are stuck, take a look at [Hint3](https://www.tensorflow.org/api_docs/python/tf/reduce_sum), [Hint4](https://www.tensorflow.org/api_docs/python/tf/square) and [Hint5](https://www.tensorflow.org/api_docs/python/tf/subtract).", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: compute_content_cost\n\ndef compute_content_cost(a_C, a_G):\n \"\"\"\n Computes the content cost\n \n Arguments:\n a_C -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing content of the image C \n a_G -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing content of the image G\n \n Returns: \n J_content -- scalar that you compute using equation 1 above.\n \"\"\"\n \n ### START CODE HERE ###\n # Retrieve dimensions from a_G (≈1 line)\n m, n_H, n_W, n_C = tf.convert_to_tensor(a_C, dtype=tf.float32).get_shape().as_list()\n \n # Reshape a_C and a_G (≈2 lines)\n a_C_unrolled = tf.reshape(a_C,[m,-1,n_C])\n a_G_unrolled = tf.reshape(a_G,[m,-1,n_C])\n \n # compute the cost with tensorflow (≈1 line)\n J_content = tf.reduce_sum(tf.square(tf.subtract(a_C_unrolled, a_G_unrolled)))*(1 / (4 * n_H * n_W * n_C))\n ### END CODE HERE ###\n \n return J_content", "_____no_output_____" ], [ "tf.reset_default_graph()\n\nwith tf.Session() as test:\n tf.set_random_seed(1)\n a_C = tf.random_normal([1, 4, 4, 3], mean=1, stddev=4)\n a_G = tf.random_normal([1, 4, 4, 3], mean=1, stddev=4)\n J_content = compute_content_cost(a_C, a_G)\n print(\"J_content = \" + str(J_content.eval()))", "J_content = 6.76559\n" ] ], [ [ "**Expected Output**:\n\n<table>\n <tr>\n <td>\n **J_content**\n </td>\n <td>\n 6.76559\n </td>\n </tr>\n\n</table>", "_____no_output_____" ], [ "<font color='blue'>\n**What you should remember**:\n- The content cost takes a hidden layer activation of the neural network, and measures how different $a^{(C)}$ and $a^{(G)}$ are. \n- When we minimize the content cost later, this will help make sure $G$ has similar content as $C$.", "_____no_output_____" ], [ "### 3.2 - Computing the style cost\n\nFor our running example, we will use the following style image: ", "_____no_output_____" ] ], [ [ "style_image = scipy.misc.imread(\"images/monet_800600.jpg\")\nimshow(style_image)", "_____no_output_____" ] ], [ [ "This painting was painted in the style of *[impressionism](https://en.wikipedia.org/wiki/Impressionism)*.\n\nLets see how you can now define a \"style\" const function $J_{style}(S,G)$. ", "_____no_output_____" ], [ "### 3.2.1 - Style matrix\n\nThe style matrix is also called a \"Gram matrix.\" In linear algebra, the Gram matrix G of a set of vectors $(v_{1},\\dots ,v_{n})$ is the matrix of dot products, whose entries are ${\\displaystyle G_{ij} = v_{i}^T v_{j} = np.dot(v_{i}, v_{j}) }$. In other words, $G_{ij}$ compares how similar $v_i$ is to $v_j$: If they are highly similar, you would expect them to have a large dot product, and thus for $G_{ij}$ to be large. \n\nNote that there is an unfortunate collision in the variable names used here. We are following common terminology used in the literature, but $G$ is used to denote the Style matrix (or Gram matrix) as well as to denote the generated image $G$. We will try to make sure which $G$ we are referring to is always clear from the context. \n\nIn NST, you can compute the Style matrix by multiplying the \"unrolled\" filter matrix with their transpose:\n\n<img src=\"images/NST_GM.png\" style=\"width:900px;height:300px;\">\n\nThe result is a matrix of dimension $(n_C,n_C)$ where $n_C$ is the number of filters. The value $G_{ij}$ measures how similar the activations of filter $i$ are to the activations of filter $j$. \n\nOne important part of the gram matrix is that the diagonal elements such as $G_{ii}$ also measures how active filter $i$ is. For example, suppose filter $i$ is detecting vertical textures in the image. Then $G_{ii}$ measures how common vertical textures are in the image as a whole: If $G_{ii}$ is large, this means that the image has a lot of vertical texture. \n\nBy capturing the prevalence of different types of features ($G_{ii}$), as well as how much different features occur together ($G_{ij}$), the Style matrix $G$ measures the style of an image. \n\n**Exercise**:\nUsing TensorFlow, implement a function that computes the Gram matrix of a matrix A. The formula is: The gram matrix of A is $G_A = AA^T$. If you are stuck, take a look at [Hint 1](https://www.tensorflow.org/api_docs/python/tf/matmul) and [Hint 2](https://www.tensorflow.org/api_docs/python/tf/transpose).", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: gram_matrix\n\ndef gram_matrix(A):\n \"\"\"\n Argument:\n A -- matrix of shape (n_C, n_H*n_W)\n \n Returns:\n GA -- Gram matrix of A, of shape (n_C, n_C)\n \"\"\"\n \n ### START CODE HERE ### (≈1 line)\n GA = tf.matmul(A,tf.transpose(A))\n ### END CODE HERE ###\n \n return GA", "_____no_output_____" ], [ "tf.reset_default_graph()\n\nwith tf.Session() as test:\n tf.set_random_seed(1)\n A = tf.random_normal([3, 2*1], mean=1, stddev=4)\n GA = gram_matrix(A)\n \n print(\"GA = \" + str(GA.eval()))", "GA = [[ 6.42230511 -4.42912197 -2.09668207]\n [ -4.42912197 19.46583748 19.56387138]\n [ -2.09668207 19.56387138 20.6864624 ]]\n" ] ], [ [ "**Expected Output**:\n\n<table>\n <tr>\n <td>\n **GA**\n </td>\n <td>\n [[ 6.42230511 -4.42912197 -2.09668207] <br>\n [ -4.42912197 19.46583748 19.56387138] <br>\n [ -2.09668207 19.56387138 20.6864624 ]]\n </td>\n </tr>\n\n</table>", "_____no_output_____" ], [ "### 3.2.2 - Style cost", "_____no_output_____" ], [ "After generating the Style matrix (Gram matrix), your goal will be to minimize the distance between the Gram matrix of the \"style\" image S and that of the \"generated\" image G. For now, we are using only a single hidden layer $a^{[l]}$, and the corresponding style cost for this layer is defined as: \n\n$$J_{style}^{[l]}(S,G) = \\frac{1}{4 \\times {n_C}^2 \\times (n_H \\times n_W)^2} \\sum _{i=1}^{n_C}\\sum_{j=1}^{n_C}(G^{(S)}_{ij} - G^{(G)}_{ij})^2\\tag{2} $$\n\nwhere $G^{(S)}$ and $G^{(G)}$ are respectively the Gram matrices of the \"style\" image and the \"generated\" image, computed using the hidden layer activations for a particular hidden layer in the network. \n", "_____no_output_____" ], [ "**Exercise**: Compute the style cost for a single layer. \n\n**Instructions**: The 3 steps to implement this function are:\n1. Retrieve dimensions from the hidden layer activations a_G: \n - To retrieve dimensions from a tensor X, use: `X.get_shape().as_list()`\n2. Unroll the hidden layer activations a_S and a_G into 2D matrices, as explained in the picture above.\n - You may find [Hint1](https://www.tensorflow.org/versions/r1.3/api_docs/python/tf/transpose) and [Hint2](https://www.tensorflow.org/versions/r1.2/api_docs/python/tf/reshape) useful.\n3. Compute the Style matrix of the images S and G. (Use the function you had previously written.) \n4. Compute the Style cost:\n - You may find [Hint3](https://www.tensorflow.org/api_docs/python/tf/reduce_sum), [Hint4](https://www.tensorflow.org/api_docs/python/tf/square) and [Hint5](https://www.tensorflow.org/api_docs/python/tf/subtract) useful.", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: compute_layer_style_cost\n\ndef compute_layer_style_cost(a_S, a_G):\n \"\"\"\n Arguments:\n a_S -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing style of the image S \n a_G -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing style of the image G\n \n Returns: \n J_style_layer -- tensor representing a scalar value, style cost defined above by equation (2)\n \"\"\"\n \n ### START CODE HERE ###\n # Retrieve dimensions from a_G (≈1 line)\n m, n_H, n_W, n_C = tf.convert_to_tensor(a_S, dtype=tf.float32).get_shape().as_list()\n \n # Reshape the images to have them of shape (n_C, n_H*n_W) (≈2 lines)\n a_S = tf.transpose(tf.reshape(a_S, [n_H*n_W, n_C]))\n a_G = tf.transpose(tf.reshape(a_G, [n_H*n_W, n_C]))\n\n # Computing gram_matrices for both images S and G (≈2 lines)\n GS = gram_matrix(a_S)\n GG = gram_matrix(a_G)\n\n # Computing the loss (≈1 line)\n J_style_layer = tf.reduce_sum(tf.square(tf.subtract(GS, GG))) * (1 / (4 * n_C **2 * (n_H * n_H)**2))\n \n ### END CODE HERE ###\n \n return J_style_layer", "_____no_output_____" ], [ "tf.reset_default_graph()\n\nwith tf.Session() as test:\n tf.set_random_seed(1)\n a_S = tf.random_normal([1, 4, 4, 3], mean=1, stddev=4)\n a_G = tf.random_normal([1, 4, 4, 3], mean=1, stddev=4)\n J_style_layer = compute_layer_style_cost(a_S, a_G)\n \n print(\"J_style_layer = \" + str(J_style_layer.eval()))", "J_style_layer = 9.19028\n" ] ], [ [ "**Expected Output**:\n\n<table>\n <tr>\n <td>\n **J_style_layer**\n </td>\n <td>\n 9.19028\n </td>\n </tr>\n\n</table>", "_____no_output_____" ], [ "### 3.2.3 Style Weights\n\nSo far you have captured the style from only one layer. We'll get better results if we \"merge\" style costs from several different layers. After completing this exercise, feel free to come back and experiment with different weights to see how it changes the generated image $G$. But for now, this is a pretty reasonable default: ", "_____no_output_____" ] ], [ [ "STYLE_LAYERS = [\n ('conv1_1', 0.2),\n ('conv2_1', 0.2),\n ('conv3_1', 0.2),\n ('conv4_1', 0.2),\n ('conv5_1', 0.2)]", "_____no_output_____" ] ], [ [ "You can combine the style costs for different layers as follows:\n\n$$J_{style}(S,G) = \\sum_{l} \\lambda^{[l]} J^{[l]}_{style}(S,G)$$\n\nwhere the values for $\\lambda^{[l]}$ are given in `STYLE_LAYERS`. \n", "_____no_output_____" ], [ "We've implemented a compute_style_cost(...) function. It simply calls your `compute_layer_style_cost(...)` several times, and weights their results using the values in `STYLE_LAYERS`. Read over it to make sure you understand what it's doing. \n\n<!-- \n2. Loop over (layer_name, coeff) from STYLE_LAYERS:\n a. Select the output tensor of the current layer. As an example, to call the tensor from the \"conv1_1\" layer you would do: out = model[\"conv1_1\"]\n b. Get the style of the style image from the current layer by running the session on the tensor \"out\"\n c. Get a tensor representing the style of the generated image from the current layer. It is just \"out\".\n d. Now that you have both styles. Use the function you've implemented above to compute the style_cost for the current layer\n e. Add (style_cost x coeff) of the current layer to overall style cost (J_style)\n3. Return J_style, which should now be the sum of the (style_cost x coeff) for each layer.\n!--> \n", "_____no_output_____" ] ], [ [ "def compute_style_cost(model, STYLE_LAYERS):\n \"\"\"\n Computes the overall style cost from several chosen layers\n \n Arguments:\n model -- our tensorflow model\n STYLE_LAYERS -- A python list containing:\n - the names of the layers we would like to extract style from\n - a coefficient for each of them\n \n Returns: \n J_style -- tensor representing a scalar value, style cost defined above by equation (2)\n \"\"\"\n \n # initialize the overall style cost\n J_style = 0\n\n for layer_name, coeff in STYLE_LAYERS:\n\n # Select the output tensor of the currently selected layer\n out = model[layer_name]\n\n # Set a_S to be the hidden layer activation from the layer we have selected, by running the session on out\n a_S = sess.run(out)\n\n # Set a_G to be the hidden layer activation from same layer. Here, a_G references model[layer_name] \n # and isn't evaluated yet. Later in the code, we'll assign the image G as the model input, so that\n # when we run the session, this will be the activations drawn from the appropriate layer, with G as input.\n a_G = out\n \n # Compute style_cost for the current layer\n J_style_layer = compute_layer_style_cost(a_S, a_G)\n\n # Add coeff * J_style_layer of this layer to overall style cost\n J_style += coeff * J_style_layer\n\n return J_style", "_____no_output_____" ] ], [ [ "**Note**: In the inner-loop of the for-loop above, `a_G` is a tensor and hasn't been evaluated yet. It will be evaluated and updated at each iteration when we run the TensorFlow graph in model_nn() below.\n\n<!-- \nHow do you choose the coefficients for each layer? The deeper layers capture higher-level concepts, and the features in the deeper layers are less localized in the image relative to each other. So if you want the generated image to softly follow the style image, try choosing larger weights for deeper layers and smaller weights for the first layers. In contrast, if you want the generated image to strongly follow the style image, try choosing smaller weights for deeper layers and larger weights for the first layers\n!-->\n\n\n<font color='blue'>\n**What you should remember**:\n- The style of an image can be represented using the Gram matrix of a hidden layer's activations. However, we get even better results combining this representation from multiple different layers. This is in contrast to the content representation, where usually using just a single hidden layer is sufficient.\n- Minimizing the style cost will cause the image $G$ to follow the style of the image $S$. \n</font color='blue'>\n\n", "_____no_output_____" ], [ "### 3.3 - Defining the total cost to optimize", "_____no_output_____" ], [ "Finally, let's create a cost function that minimizes both the style and the content cost. The formula is: \n\n$$J(G) = \\alpha J_{content}(C,G) + \\beta J_{style}(S,G)$$\n\n**Exercise**: Implement the total cost function which includes both the content cost and the style cost. ", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: total_cost\n\ndef total_cost(J_content, J_style, alpha = 10, beta = 40):\n \"\"\"\n Computes the total cost function\n \n Arguments:\n J_content -- content cost coded above\n J_style -- style cost coded above\n alpha -- hyperparameter weighting the importance of the content cost\n beta -- hyperparameter weighting the importance of the style cost\n \n Returns:\n J -- total cost as defined by the formula above.\n \"\"\"\n \n ### START CODE HERE ### (≈1 line)\n J = alpha * J_content + beta * J_style\n ### END CODE HERE ###\n \n return J", "_____no_output_____" ], [ "tf.reset_default_graph()\n\nwith tf.Session() as test:\n np.random.seed(3)\n J_content = np.random.randn() \n J_style = np.random.randn()\n J = total_cost(J_content, J_style)\n print(\"J = \" + str(J))", "J = 35.34667875478276\n" ] ], [ [ "**Expected Output**:\n\n<table>\n <tr>\n <td>\n **J**\n </td>\n <td>\n 35.34667875478276\n </td>\n </tr>\n\n</table>", "_____no_output_____" ], [ "<font color='blue'>\n**What you should remember**:\n- The total cost is a linear combination of the content cost $J_{content}(C,G)$ and the style cost $J_{style}(S,G)$\n- $\\alpha$ and $\\beta$ are hyperparameters that control the relative weighting between content and style", "_____no_output_____" ], [ "## 4 - Solving the optimization problem", "_____no_output_____" ], [ "Finally, let's put everything together to implement Neural Style Transfer!\n\n\nHere's what the program will have to do:\n<font color='purple'>\n\n1. Create an Interactive Session\n2. Load the content image \n3. Load the style image\n4. Randomly initialize the image to be generated \n5. Load the VGG16 model\n7. Build the TensorFlow graph:\n - Run the content image through the VGG16 model and compute the content cost\n - Run the style image through the VGG16 model and compute the style cost\n - Compute the total cost\n - Define the optimizer and the learning rate\n8. Initialize the TensorFlow graph and run it for a large number of iterations, updating the generated image at every step.\n\n</font>\nLets go through the individual steps in detail. ", "_____no_output_____" ], [ "You've previously implemented the overall cost $J(G)$. We'll now set up TensorFlow to optimize this with respect to $G$. To do so, your program has to reset the graph and use an \"[Interactive Session](https://www.tensorflow.org/api_docs/python/tf/InteractiveSession)\". Unlike a regular session, the \"Interactive Session\" installs itself as the default session to build a graph. This allows you to run variables without constantly needing to refer to the session object, which simplifies the code. \n\nLets start the interactive session.", "_____no_output_____" ] ], [ [ "# Reset the graph\ntf.reset_default_graph()\n\n# Start interactive session\nsess = tf.InteractiveSession()", "_____no_output_____" ] ], [ [ "Let's load, reshape, and normalize our \"content\" image (the Louvre museum picture):", "_____no_output_____" ] ], [ [ "content_image = scipy.misc.imread(\"images/louvre_small.jpg\")\ncontent_image = reshape_and_normalize_image(content_image)", "_____no_output_____" ] ], [ [ "Let's load, reshape and normalize our \"style\" image (Claude Monet's painting):", "_____no_output_____" ] ], [ [ "style_image = scipy.misc.imread(\"images/monet.jpg\")\nstyle_image = reshape_and_normalize_image(style_image)", "_____no_output_____" ] ], [ [ "Now, we initialize the \"generated\" image as a noisy image created from the content_image. By initializing the pixels of the generated image to be mostly noise but still slightly correlated with the content image, this will help the content of the \"generated\" image more rapidly match the content of the \"content\" image. (Feel free to look in `nst_utils.py` to see the details of `generate_noise_image(...)`; to do so, click \"File-->Open...\" at the upper-left corner of this Jupyter notebook.)", "_____no_output_____" ] ], [ [ "generated_image = generate_noise_image(content_image)\nimshow(generated_image[0])", "_____no_output_____" ] ], [ [ "Next, as explained in part (2), let's load the VGG16 model.", "_____no_output_____" ] ], [ [ "model = load_vgg_model(\"pretrained-model/imagenet-vgg-verydeep-19.mat\")", "_____no_output_____" ] ], [ [ "To get the program to compute the content cost, we will now assign `a_C` and `a_G` to be the appropriate hidden layer activations. We will use layer `conv4_2` to compute the content cost. The code below does the following:\n\n1. Assign the content image to be the input to the VGG model.\n2. Set a_C to be the tensor giving the hidden layer activation for layer \"conv4_2\".\n3. Set a_G to be the tensor giving the hidden layer activation for the same layer. \n4. Compute the content cost using a_C and a_G.", "_____no_output_____" ] ], [ [ "# Assign the content image to be the input of the VGG model. \nsess.run(model['input'].assign(content_image))\n\n# Select the output tensor of layer conv4_2\nout = model['conv4_2']\n\n# Set a_C to be the hidden layer activation from the layer we have selected\na_C = sess.run(out)\n\n# Set a_G to be the hidden layer activation from same layer. Here, a_G references model['conv4_2'] \n# and isn't evaluated yet. Later in the code, we'll assign the image G as the model input, so that\n# when we run the session, this will be the activations drawn from the appropriate layer, with G as input.\na_G = out\n\n# Compute the content cost\nJ_content = compute_content_cost(a_C, a_G)", "_____no_output_____" ] ], [ [ "**Note**: At this point, a_G is a tensor and hasn't been evaluated. It will be evaluated and updated at each iteration when we run the Tensorflow graph in model_nn() below.", "_____no_output_____" ] ], [ [ "# Assign the input of the model to be the \"style\" image \nsess.run(model['input'].assign(style_image))\n\n# Compute the style cost\nJ_style = compute_style_cost(model, STYLE_LAYERS)", "_____no_output_____" ] ], [ [ "**Exercise**: Now that you have J_content and J_style, compute the total cost J by calling `total_cost()`. Use `alpha = 10` and `beta = 40`.", "_____no_output_____" ] ], [ [ "### START CODE HERE ### (1 line)\nJ = total_cost(J_content, J_style, 10, 40)\n### END CODE HERE ###", "_____no_output_____" ] ], [ [ "You'd previously learned how to set up the Adam optimizer in TensorFlow. Lets do that here, using a learning rate of 2.0. [See reference](https://www.tensorflow.org/api_docs/python/tf/train/AdamOptimizer)", "_____no_output_____" ] ], [ [ "# define optimizer (1 line)\noptimizer = tf.train.AdamOptimizer(2.0)\n\n# define train_step (1 line)\ntrain_step = optimizer.minimize(J)", "_____no_output_____" ] ], [ [ "**Exercise**: Implement the model_nn() function which initializes the variables of the tensorflow graph, assigns the input image (initial generated image) as the input of the VGG16 model and runs the train_step for a large number of steps.", "_____no_output_____" ] ], [ [ "def model_nn(sess, input_image, num_iterations = 200):\n \n # Initialize global variables (you need to run the session on the initializer)\n ### START CODE HERE ### (1 line)\n sess.run(tf.global_variables_initializer())\n ### END CODE HERE ###\n \n # Run the noisy input image (initial generated image) through the model. Use assign().\n ### START CODE HERE ### (1 line)\n sess.run(model['input'].assign(input_image))\n ### END CODE HERE ###\n \n for i in range(num_iterations):\n \n # Run the session on the train_step to minimize the total cost\n ### START CODE HERE ### (1 line)\n _ =sess.run(train_step)\n ### END CODE HERE ###\n \n # Compute the generated image by running the session on the current model['input']\n ### START CODE HERE ### (1 line)\n generated_image = sess.run(model['input'])\n ### END CODE HERE ###\n\n # Print every 20 iteration.\n if i%20 == 0:\n Jt, Jc, Js = sess.run([J, J_content, J_style])\n print(\"Iteration \" + str(i) + \" :\")\n print(\"total cost = \" + str(Jt))\n print(\"content cost = \" + str(Jc))\n print(\"style cost = \" + str(Js))\n \n # save current generated image in the \"/output\" directory\n save_image(\"output/\" + str(i) + \".png\", generated_image)\n \n # save last generated image\n save_image('output/generated_image.jpg', generated_image)\n \n return generated_image", "_____no_output_____" ] ], [ [ "Run the following cell to generate an artistic image. It should take about 3min on CPU for every 20 iterations but you start observing attractive results after ≈140 iterations. Neural Style Transfer is generally trained using GPUs.", "_____no_output_____" ] ], [ [ "model_nn(sess, generated_image)", "Iteration 0 :\ntotal cost = 8.75487e+09\ncontent cost = 7877.36\nstyle cost = 2.1887e+08\nIteration 20 :\ntotal cost = 1.63786e+09\ncontent cost = 15179.7\nstyle cost = 4.09426e+07\nIteration 40 :\ntotal cost = 8.4279e+08\ncontent cost = 16778.1\nstyle cost = 2.10656e+07\nIteration 60 :\ntotal cost = 5.4373e+08\ncontent cost = 17466.7\nstyle cost = 1.35889e+07\nIteration 80 :\ntotal cost = 3.97197e+08\ncontent cost = 17723.8\nstyle cost = 9.92549e+06\nIteration 100 :\ntotal cost = 3.14456e+08\ncontent cost = 17913.0\nstyle cost = 7.85691e+06\nIteration 120 :\ntotal cost = 2.60942e+08\ncontent cost = 18065.0\nstyle cost = 6.51903e+06\nIteration 140 :\ntotal cost = 2.22287e+08\ncontent cost = 18221.0\nstyle cost = 5.55262e+06\nIteration 160 :\ntotal cost = 1.92714e+08\ncontent cost = 18384.1\nstyle cost = 4.81325e+06\nIteration 180 :\ntotal cost = 1.69396e+08\ncontent cost = 18533.7\nstyle cost = 4.23026e+06\n" ] ], [ [ "**Expected Output**:\n\n<table>\n <tr>\n <td>\n **Iteration 0 : **\n </td>\n <td>\n total cost = 5.05035e+09 <br>\n content cost = 7877.67 <br>\n style cost = 1.26257e+08\n </td>\n </tr>\n\n</table>", "_____no_output_____" ], [ "You're done! After running this, in the upper bar of the notebook click on \"File\" and then \"Open\". Go to the \"/output\" directory to see all the saved images. Open \"generated_image\" to see the generated image! :)\n\nYou should see something the image presented below on the right:\n\n<img src=\"images/louvre_generated.png\" style=\"width:800px;height:300px;\">\n\nWe didn't want you to wait too long to see an initial result, and so had set the hyperparameters accordingly. To get the best looking results, running the optimization algorithm longer (and perhaps with a smaller learning rate) might work better. After completing and submitting this assignment, we encourage you to come back and play more with this notebook, and see if you can generate even better looking images. ", "_____no_output_____" ], [ "Here are few other examples:\n\n- The beautiful ruins of the ancient city of Persepolis (Iran) with the style of Van Gogh (The Starry Night)\n<img src=\"images/perspolis_vangogh.png\" style=\"width:750px;height:300px;\">\n\n- The tomb of Cyrus the great in Pasargadae with the style of a Ceramic Kashi from Ispahan.\n<img src=\"images/pasargad_kashi.png\" style=\"width:750px;height:300px;\">\n\n- A scientific study of a turbulent fluid with the style of a abstract blue fluid painting.\n<img src=\"images/circle_abstract.png\" style=\"width:750px;height:300px;\">", "_____no_output_____" ], [ "## 5 - Test with your own image (Optional/Ungraded)", "_____no_output_____" ], [ "Finally, you can also rerun the algorithm on your own images! \n\nTo do so, go back to part 4 and change the content image and style image with your own pictures. In detail, here's what you should do:\n\n1. Click on \"File -> Open\" in the upper tab of the notebook\n2. Go to \"/images\" and upload your images (requirement: (WIDTH = 300, HEIGHT = 225)), rename them \"my_content.png\" and \"my_style.png\" for example.\n3. Change the code in part (3.4) from :\n```python\ncontent_image = scipy.misc.imread(\"images/louvre.jpg\")\nstyle_image = scipy.misc.imread(\"images/claude-monet.jpg\")\n```\nto:\n```python\ncontent_image = scipy.misc.imread(\"images/my_content.jpg\")\nstyle_image = scipy.misc.imread(\"images/my_style.jpg\")\n```\n4. Rerun the cells (you may need to restart the Kernel in the upper tab of the notebook).\n\nYou can also tune your hyperparameters: \n- Which layers are responsible for representing the style? STYLE_LAYERS\n- How many iterations do you want to run the algorithm? num_iterations\n- What is the relative weighting between content and style? alpha/beta", "_____no_output_____" ], [ "## 6 - Conclusion\n\nGreat job on completing this assignment! You are now able to use Neural Style Transfer to generate artistic images. This is also your first time building a model in which the optimization algorithm updates the pixel values rather than the neural network's parameters. Deep learning has many different types of models and this is only one of them! \n\n<font color='blue'>\nWhat you should remember:\n- Neural Style Transfer is an algorithm that given a content image C and a style image S can generate an artistic image\n- It uses representations (hidden layer activations) based on a pretrained ConvNet. \n- The content cost function is computed using one hidden layer's activations.\n- The style cost function for one layer is computed using the Gram matrix of that layer's activations. The overall style cost function is obtained using several hidden layers.\n- Optimizing the total cost function results in synthesizing new images. \n\n\n", "_____no_output_____" ], [ "This was the final programming exercise of this course. Congratulations--you've finished all the programming exercises of this course on Convolutional Networks! We hope to also see you in Course 5, on Sequence models! \n", "_____no_output_____" ], [ "### References:\n\nThe Neural Style Transfer algorithm was due to Gatys et al. (2015). Harish Narayanan and Github user \"log0\" also have highly readable write-ups from which we drew inspiration. The pre-trained network used in this implementation is a VGG network, which is due to Simonyan and Zisserman (2015). Pre-trained weights were from the work of the MathConvNet team. \n\n- Leon A. Gatys, Alexander S. Ecker, Matthias Bethge, (2015). A Neural Algorithm of Artistic Style (https://arxiv.org/abs/1508.06576) \n- Harish Narayanan, Convolutional neural networks for artistic style transfer. https://harishnarayanan.org/writing/artistic-style-transfer/\n- Log0, TensorFlow Implementation of \"A Neural Algorithm of Artistic Style\". http://www.chioka.in/tensorflow-implementation-neural-algorithm-of-artistic-style\n- Karen Simonyan and Andrew Zisserman (2015). Very deep convolutional networks for large-scale image recognition (https://arxiv.org/pdf/1409.1556.pdf)\n- MatConvNet. http://www.vlfeat.org/matconvnet/pretrained/\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
4a7d9e8ab4092be1cb7879845659fcdf8f2baef3
5,105
ipynb
Jupyter Notebook
Cap02/P-2_113.ipynb
IsraelBOrrico/montgomery_resolucao_python
eef4f7fb427c2dfd7ba0eb1fc3f9bf3c39a2de9f
[ "MIT" ]
null
null
null
Cap02/P-2_113.ipynb
IsraelBOrrico/montgomery_resolucao_python
eef4f7fb427c2dfd7ba0eb1fc3f9bf3c39a2de9f
[ "MIT" ]
null
null
null
Cap02/P-2_113.ipynb
IsraelBOrrico/montgomery_resolucao_python
eef4f7fb427c2dfd7ba0eb1fc3f9bf3c39a2de9f
[ "MIT" ]
null
null
null
25.525
151
0.470323
[ [ [ "# Problem 113 from chapter 02\n***", "_____no_output_____" ] ], [ [ "from pandas import DataFrame", "_____no_output_____" ], [ "reactions = DataFrame({'Below_Target': [12, 44, 56], 'Above_Target': [40, 16, 36]}, index = ['266K', '271K', '274K'])\nreactions.index.name = 'Final_Temperature_Conditions'\nreactions.columns.name = 'Heat_Absorbed(cal)'\nreactions", "_____no_output_____" ] ], [ [ "### Let A denote the event that a reaction's final temperature is 271K or less and let B denote the event that the heat absorbed is above target.", "_____no_output_____" ], [ "### Item a. P(A|B)", "_____no_output_____" ] ], [ [ "P_AgivenB = reactions.loc['266K':'271K', :].sum().Above_Target / reactions.sum().Above_Target\nprint(f'The probability that the reaction temperature is 271K or less given that its above target is {P_AgivenB:.2f}')", "The probability that the reaction temperature is 271K or less given that its above target is 0.61\n" ] ], [ [ "### Item b. P(A'|B)", "_____no_output_____" ] ], [ [ "P_NAgivenB = reactions.loc['274K', 'Above_Target'] / reactions.sum().Above_Target\nprint(f'The probability that the reaction temperature is above 274K given that its above target is {P_NAgivenB:.2f}')", "The probability that the reaction temperature is above 274K given that its above target is 0.39\n" ] ], [ [ "### Item c. P(A|B')", "_____no_output_____" ] ], [ [ "P_AgivenNB = reactions.loc['266K':'271K', 'Below_Target'].sum() / reactions.sum().Below_Target\nprint(f'The probability that the reaction temperature is 271K or less given that its below target is {P_AgivenNB:.2f}')", "The probability that the reaction temperature is 271K or less given that its below target is 0.50\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a7da84d82c106fd986e90055685972df76bdf1c
1,543
ipynb
Jupyter Notebook
dev_nb.ipynb
ArsalanRiaz/dev_playground
b06ca10e592e1871a519f4e6aeadba040642dee2
[ "Apache-2.0" ]
null
null
null
dev_nb.ipynb
ArsalanRiaz/dev_playground
b06ca10e592e1871a519f4e6aeadba040642dee2
[ "Apache-2.0" ]
null
null
null
dev_nb.ipynb
ArsalanRiaz/dev_playground
b06ca10e592e1871a519f4e6aeadba040642dee2
[ "Apache-2.0" ]
null
null
null
19.782051
217
0.526896
[ [ [ "# dev_nb\n\n> The purpose of this notebook is to provide a scaffold upon which we will investigate the behavior of resolving merge conflicts in Jupyter notebooks. Once we're used to these concepts, we'll explore branching.", "_____no_output_____" ] ], [ [ "# This function multiplies values by two\ndef mult2(val):\n '''Returns double of the supplied input parameter val'''\n return 2 * val", "_____no_output_____" ], [ "# Test the function\nassert mult2(2) == 4", "_____no_output_____" ], [ "# Demo the function\nmult2(4)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ] ]