hexsha
stringlengths
40
40
size
int64
6
14.9M
ext
stringclasses
1 value
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
6
260
max_stars_repo_name
stringlengths
6
119
max_stars_repo_head_hexsha
stringlengths
40
41
max_stars_repo_licenses
list
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
6
260
max_issues_repo_name
stringlengths
6
119
max_issues_repo_head_hexsha
stringlengths
40
41
max_issues_repo_licenses
list
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
6
260
max_forks_repo_name
stringlengths
6
119
max_forks_repo_head_hexsha
stringlengths
40
41
max_forks_repo_licenses
list
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
avg_line_length
float64
2
1.04M
max_line_length
int64
2
11.2M
alphanum_fraction
float64
0
1
cells
list
cell_types
list
cell_type_groups
list
c52c457c2fe5955a5a502f11659d6509a5c2dc28
425,179
ipynb
Jupyter Notebook
Cani_Index_Info.ipynb
TMQR/Cannabis_Index_Research
50e385ba4d4873c15e5de97daed3c208be85992f
[ "MIT" ]
1
2019-10-26T22:08:43.000Z
2019-10-26T22:08:43.000Z
Cani_Index_Info.ipynb
TMQR/Cannabis_Index_Research
50e385ba4d4873c15e5de97daed3c208be85992f
[ "MIT" ]
null
null
null
Cani_Index_Info.ipynb
TMQR/Cannabis_Index_Research
50e385ba4d4873c15e5de97daed3c208be85992f
[ "MIT" ]
null
null
null
438.328866
90,872
0.932624
[ [ [ "Based On the Canadian Marijuana Index these are the primary players in the Canadian Market.", "_____no_output_____" ] ], [ [ "from pandas_datareader import data as pdr\nimport fix_yahoo_finance as fyf\nimport matplotlib.pyplot as plt\nimport datetime\nimport numpy as np\nimport pandas as pd\n\nimport scipy\n# import statsmodels.api as sm\n\n\nfrom sklearn import mixture as mix\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import SVC\n\n\nimport bt\nimport ffn\nimport jhtalib as jhta\nimport datetime\n\n# import matplotlib as plt\nimport seaborn as sns\nsns.set()\n\n\nfyf.pdr_override()\n# If want Futures data call Quandl\n\n\n# # Dates\nstart = datetime.datetime(2005, 1, 1)\nend = datetime.datetime(2019, 1, 27)", "_____no_output_____" ], [ "\npd.core.common.is_list_like = pd.api.types.is_list_like\n# import pandas_datareader as pdr\n%pylab\n# params = {'legend.fontsize': 'x-large',\n# 'figure.figsize': (15, 5),\n# 'axes.labelsize': 'x-large',\n# 'axes.titlesize':'x-large',\n# 'xtick.labelsize':'x-large',\n# 'ytick.labelsize':'x-large'}\n# pylab.rcParams.update(params)\n\n%matplotlib inline", "Using matplotlib backend: TkAgg\nPopulating the interactive namespace from numpy and matplotlib\n" ], [ "# stocks = ['WEED.TO','ACB.TO','TLRY',\n# 'CRON.TO', 'HEXO.TO', 'TRST.TO',\n# 'OGI.V', 'TGOD.TO', 'RIV.V', 'TER.CN',\n# 'XLY.V', 'FIRE.V', 'EMH.V',\n# 'N.V', 'VIVO.V', 'WAYL.CN',\n# 'HIP.V', 'APHA.TO', 'SNN.CN', 'ISOL.CN']", "_____no_output_____" ], [ "# Maijuana_Index = pdr.get_data_yahoo(stocks,start= start)\n# # ", "_____no_output_____" ], [ "# (Maijuana_Index['Adj Close']['2017':]).plot(figsize=(15,9), title='Canadain Cannibis Index Components')", "_____no_output_____" ], [ "# Maijuana_Index = Maijuana_Index['Adj Close']\n\n# Maijuana_Index = ffn.rebase(Maijuana_Index)", "_____no_output_____" ], [ "# Real_Es_Sector = ['IIPR', 'MJNE', 'PRRE', \n# 'GRWC', 'ZDPY', 'TURV', \n# 'AGTK', 'DPWW', 'CGRA', \n# 'DPWW', 'FUTL', 'FTPM']\n\nReal_Es_Sector = ['PRRE']", "_____no_output_____" ], [ "# list(dict.values(client.get_ticker_metadata(('HYYDF'))))", "_____no_output_____" ], [ "# # def get_names(list):\n# try:\n# for i in range(len(Real_Es_Sector)):\n# # df = pd.DataFrame(len(Real_Es_Sector))\n \n# x = print(list(dict.values(client.get_ticker_metadata((Real_Es_Sector[i]))))[5])\n# except:\n# pass\n \n# # print(list(dict.values(client.get_ticker_metadata((Real_Es_Sector[i]))))[5])\n \n \n \n ", "_____no_output_____" ], [ "# Canada_Index_Names = ['Canaopy Growth Corporation', 'Aurora Canabis Inc.',\n# 'Tilray Inc.', 'Cronos Group Inc.', 'Aphria Inc', 'HEXO Corp.'\n# 'CannTrust Holdings Inc.', 'OrganiGram Holdings Inc', \n# 'The Green Organic Dutchman','Canopy Rivers Inc.', \n# 'TerrAscend Corp.', 'Auxly Cannabis Group Inc.',\n# 'The Supreme Cannabis Company Inc.','Emerald Health Therapeutics Inc.',\n# 'Namaste Technologies Inc.', 'Vivo Cannabis Inc.','Newstrike Brands Ltd',\n# 'Wayland Group','Sunniva Inc - Ordinary Shares', \n# 'Isodiol International Inc.']", "_____no_output_____" ], [ "# list(dict.values(client.get_ticker_metadata(('CGRA'))))", "_____no_output_____" ], [ "# %%html\n# <iframe src=\"https://www.bloomberg.com/quote/HEXO:CN\" width=\"1400\" height=\"1300\"></iframe>", "_____no_output_____" ] ], [ [ "###### source : http://marijuanaindex.com/stock-quotes/canadian-marijuana-index/\n###### source : https://www.bloomberg.com/quote/WEED:CN", "_____no_output_____" ], [ "## 1. Canopy Growth Corporation : ticker WEED\n### - Mkt cap $13,126,000,000 CAD as of January 2019\n\n##### Canopy Growth Corporation, through its subsidiaries, is a producer of medical marijuana. The Company's group of brands represents distinct voices and market positions designed to appeal to an array of customers, doctors and strategic industry partners.\n\nBloomberg Description:\nhttps://www.bloomberg.com/quote/WEED:CN\n\nCEO: Bruce Linton\nhttps://www.linkedin.com/in/bruce-linton-152137/\n\nCFO: Tim Saunders\nhttps://www.linkedin.com/in/tsaunders/\n", "_____no_output_____" ] ], [ [ "WEED = pdr.get_data_yahoo('WEED.TO',start= start)", "[*********************100%***********************] 1 of 1 downloaded\n" ] ], [ [ "# Canopy Growth Corporation\n### Price Action", "_____no_output_____" ] ], [ [ "WEED['Adj Close']['2017':].plot(figsize=(15,9), title='Canopy Growth Corporation', fontsize=25)", "_____no_output_____" ] ], [ [ "## 2. Aurora Cannabis Inc. : ticker ACB.TO\n\n### - Mkt cap $6,970,000,000 CAD as of January 2019\n\n##### Overview \nsource :https://www.linkedin.com/company/aurora-cannabis-inc-/about/\n\nAurora Cannabis Inc. is a world-renowned integrated cannabis company with an industry-leading reputation for continuously elevating and setting the global cannabis industry standard.\n\nThrough our wholly owned subsidiaries, strategic investments, and global partnerships, Aurora provides a wide range of premium quality cannabis and hemp products and services, develops innovative technologies, promotes cannabis consumer health and wellness, and delivers an exceptional customer experience across all its brands.\n\nAurora’s operations span multiple continents and focuses on both the medical and recreational cannabis production and sales, patient education and clinic counselling services, home hydroponic cultivation, extraction technologies and delivery systems, and hemp-based food health products.\n\nWe operate around the globe pursuing new and emerging cannabis markets where possible through our owned network of import, export and wholesale distributors, and our e-commerce and mobile applications.\n\nBloomberg Description:\nhttps://www.bloomberg.com/quote/ACB:CN\n\nCEO: Terry Booth\nhttps://www.linkedin.com/in/terry-booth-681806131/\n\nCFO: Glen Ibbott\nhttps://www.linkedin.com/in/glenibbott/\n", "_____no_output_____" ] ], [ [ "ACB = pdr.get_data_yahoo('ACB.TO',start= start)", "[*********************100%***********************] 1 of 1 downloaded\n" ] ], [ [ "# Aurora Cannabis Inc\n### Price Action", "_____no_output_____" ] ], [ [ "ACB['Adj Close']['2017':].plot(figsize=(15,9), title='Aurora Cannabis Inc', fontsize=25)", "_____no_output_____" ] ], [ [ "## 3. Tilray Inc. : ticker TLRY\n\n### - Mkt cap $6,699,000,000 CAD as of January 2019\n\n##### Overview \nsource :https://www.linkedin.com/company/tilray/about/\n\nTilray is a global leader in medical cannabis research and production dedicated to providing safe, consistent and reliable therapy to patients. We are the only GMP certified medical cannabis producer currently supplying products to thousands of patients, physicians, pharmacies, hospitals, governments and researchers in Australia, Canada, the European Union and the Americas.\n\nBloomberg Description:\nhttps://www.bloomberg.com/quote/TLRY:US\n\nCEO: Brendan Kennedy\nhttps://www.linkedin.com/in/kennedybrendan/\n\nCFO: Mark Castaneda\nhttps://www.linkedin.com/in/mark-castaneda-ba8315/\n", "_____no_output_____" ] ], [ [ "TLRY = pdr.get_data_yahoo('TLRY',start= start)", "[*********************100%***********************] 1 of 1 downloaded\n" ] ], [ [ "# Tilray Inc\n### Price Action", "_____no_output_____" ] ], [ [ "TLRY['Adj Close']['2017':].plot(figsize=(15,9), title='Tilray Inc', fontsize=25)", "_____no_output_____" ], [ "top_three = pd.DataFrame(bt.merge(WEED['Adj Close'], ACB['Adj Close'], TLRY['Adj Close']))\n\ntop_three.columns = [['Canopy Growth Corporation', ' Aurora Cannabis Inc.', 'Tilray Inc.']]\n\ntop_three = top_three.dropna()\nffn.rebase(top_three).plot(figsize=(15,9), title='Top Cannibis Firms % Returns', fontsize=25)\n", "_____no_output_____" ] ], [ [ "## 4. Cronos Group Inc. : ticker CRON.TO\n\n### - Mkt cap $2,950,000,000 CAD as of January 2019\n\n##### Overview \nsource :https://www.linkedin.com/company/cronos-group-mjn-/about/\n\nCronos Group is a globally diversified and vertically integrated cannabis company with a presence across four continents. The Company operates two wholly-owned Canadian Licensed Producers regulated under Health Canada’s Access to Cannabis for Medical Purposes Regulations: Peace Naturals Project Inc. (Ontario), which was the first non-incumbent medical cannabis license granted by Health Canada, and Original BC Ltd. (British Columbia), which is based in the Okanagan Valley. The Company has multiple international production and distribution platforms: Cronos Israel and Cronos Australia. Through an exclusive distribution agreement, Cronos also has access to over 12,000 pharmacies in Germany as the Company focuses on building an international iconic brand portfolio and developing disruptive intellectual property.\n\nBloomberg Description:\nhttps://www.bloomberg.com/quote/CRON:CN\n\n\nCEO: Michael Gorenstein\nhttps://www.linkedin.com/in/michaelgorenstein/\n\nCFO: Nauman Siddiqui\nhttps://www.linkedin.com/in/nauman-siddiqui-cpa-cma-bba-b068aa32/", "_____no_output_____" ] ], [ [ "CRON = pdr.get_data_yahoo('CRON.TO',start= start)", "[*********************100%***********************] 1 of 1 downloaded\n" ] ], [ [ "# Cronos Group Inc\n### Price Action", "_____no_output_____" ] ], [ [ "CRON['Adj Close']['2017':].plot(figsize=(15,9), title='Cronos Group Inc', fontsize=25)", "_____no_output_____" ] ], [ [ "## 5. HEXO Corp : ticker HEXO.TO\n\n### - Mkt cap $1,260,000,000 CAD as of January 2019\n\n##### Overview \nsource : https://www.linkedin.com/company/hexo-corp/about/\n\nHEXO Corp is one of Canada's lowest cost producers of easy-to-use and easy-to-understand products to serve the Canadian medical and adult-use cannabis markets. HEXO Corp's brands include Hydropothecary, an award-winning medical cannabis brand and HEXO, for the adult-use market.\n\nBloomberg Description:\nhttps://www.bloomberg.com/quote/HEXO:CN\n\nCEO: Adam Miron \nhttps://www.linkedin.com/in/adammiron/\n\nCFO: Ed Chaplin \nhttps://www.linkedin.com/in/echaplin/", "_____no_output_____" ] ], [ [ "HEXO = pdr.get_data_yahoo('HEXO.TO',start= start)", "[*********************100%***********************] 1 of 1 downloaded\n" ] ], [ [ "# HEXO Inc\n### Price Action", "_____no_output_____" ] ], [ [ "HEXO['Adj Close']['2017':].plot(figsize=(15,9), title='HEXO Corp', fontsize=25)", "_____no_output_____" ], [ "bottom_30 = pd.read_csv('./bottom30.csv')", "_____no_output_____" ] ], [ [ "# Summary Information on the bottom 30% of the Canadian Marijuana Index", "_____no_output_____" ] ], [ [ "bottom_30.fillna('-')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
c52c6678d1915e2cf3bbf9d35b71f2969df89f15
36,631
ipynb
Jupyter Notebook
ipynb/Beal.ipynb
awesome-archive/pytudes
650b64134f9f38787b2775abcf8e915f0e2d9da1
[ "MIT" ]
null
null
null
ipynb/Beal.ipynb
awesome-archive/pytudes
650b64134f9f38787b2775abcf8e915f0e2d9da1
[ "MIT" ]
null
null
null
ipynb/Beal.ipynb
awesome-archive/pytudes
650b64134f9f38787b2775abcf8e915f0e2d9da1
[ "MIT" ]
null
null
null
34.202614
901
0.535366
[ [ [ "<div style=\"text-align:right\"><b>Peter Norvig</b> 22 October 2015, revised 28 October 2015, 4 July 2017</div>\n\n# Beal's Conjecture Revisited\n\nIn 1637, Pierre de Fermat wrote in the margin of a book that he had a proof of his famous \"[Last Theorem](https://en.wikipedia.org/wiki/Fermat%27s_Last_Theorem)\":\n\n> If $A^n + B^n = C^n$,\n> <br>where $A, B, C, n$ are positive integers\n> <br>then $n \\le 2$.\n\nCenturies passed before [Andrew Beal](https://en.wikipedia.org/wiki/Andrew_Beal), a businessman and amateur mathematician,\nmade his conjecture in 1993:\n\n> If $A^x + B^y = C^z$, \n> <br>where $A, B, C, x, y, z$ are positive integers and $x, y, z$ are all greater than $2$, \n> <br>then $A, B$ and $C$ must have a common prime factor.\n\n[Andrew Wiles](https://en.wikipedia.org/wiki/Andrew_Wiles) proved Fermat's theorem in 1995, but Beal's conjecture remains unproved, and Beal has offered [\\$1,000,000](http://abcnews.go.com/blogs/headlines/2013/06/billionaire-offers-1-million-to-solve-math-problem/) for a proof or disproof. I don't have the mathematical skills of Wiles, so I could never find a proof, but I can write a program to search for counterexamples. I first wrote [that program in 2000](http://norvig.com/beal2000.html), and [my name got associated](https://www.google.com/webhp?sourceid=chrome-instant&ion=1&espv=2&ie=UTF-8#q=beal%20conjecture) with Beal's Conjecture, which means I get a lot of emails with purported proofs or counterexamples (many asking how they can collect their prize money). So far, all the emails have been wrong. This page catalogs some of the more common errors and updates my 2000 program.", "_____no_output_____" ], [ "# How to Not Win A Million Dollars\n\n\n* A proof must show that there are no examples that satisfy the conditions. A common error is to show how a certain pattern generates an infinite collection of numbers that satisfy $A^x + B^y = C^z$ and then show that in all of these, $A, B, C$ have a common factor. But that's not good enough, unless you can also prove that no other pattern exists.\n\n<p>\n\n* It is valid to use proof by contradiction: assume the conjecture is true, and show that that leads to a contradiction. It is not valid to use proof by circular reasoning: assume the conjecture is true, put in some irrelevant steps, and show that it follows that the conjecture is true.\n\n\n<p>\n\n* A valid counterexample needs to satisfy all four conditions&mdash;don't leave one out:\n\n> $A, B, C, x, y, z$ are positive integers <br> \n$x, y, z > 2$ <br>\n$A^x + B^y = C^z$ <br>\n$A, B, C$ have no common prime factor.\n\n(If you think you might have a valid counterexample, before you share it with Andrew Beal or anyone else, you can check it with my [Online Beal Counterexample Checker](http://norvig.com/bealcheck.html).)\n\n<p>\n\n* One correspondent claimed that $27^4 + 162 ^ 3 = 9 ^ 7$ was a solution, because the first three conditions hold, and the common factor is 9, which isn't a prime. But of course, if $A, B, C$ have 9 as a common factor, then they also have 3, and 3 is prime. The phrase \"no common prime factor\" means the same thing as \"no common factor greater than 1.\"\n\n<p>\n\n* Another claimed that $2^3+2^3=2^4$ was a counterexample, because all the bases are 2, which is prime, and prime numbers have no prime factors. But that's not true; a prime number has itself as a factor.\n\n<p>\n\n* A creative person offered $1359072^4 - 940896^4 = 137998080^3$, which fails both because $3^3 2^5 11^2$ is a common factor, and because it has a subtraction rather than an addition (although, as Julius Jacobsen pointed out, that can be rectified by adding $940896^4$ to both sides).\n\n<p>\n\n* Mustafa Pehlivan came up with an example involving 76-million-digit numbers, which took some work to prove wrong (by using modulo arithmetic). \n\n<p>\n\n* Another Beal fan started by saying \"Let $C = 43$ and $z = 3$. Since $43 = 21 + 22$, we have $43^3 = (21^3 + 22^3).$\" But of course $(a + b)^3 \\ne (a^3 + b^3)$. This fallacy is called [the freshman's dream](https://en.wikipedia.org/wiki/Freshman%27s_dream) (although I remember having different dreams as a freshman).\n\n<p>\n\n* Multiple people proposed answers similar to this one:", "_____no_output_____" ] ], [ [ "from math import gcd #### In Python versions < 3.5, use \"from fractions import gcd\"", "_____no_output_____" ], [ "A, B, C = 60000000000000000000, 70000000000000000000, 82376613842809255677\n\nx = y = z = 3.\n\nA ** x + B ** y == C ** z and gcd(gcd(A, B), C) == 1", "_____no_output_____" ] ], [ [ "**WOW! The result is `True`!** Is this a real counterexample to Beal? And also a disproof of Fermat?\n\nAlas, it is not. Notice the decimal point in \"`3.`\", indicating a floating point number, with inexact, limited precision. Change the inexact \"`3.`\" to an exact \"`3`\" and the result changes to \"`False`\". Below we see that the two sides of the equation are the same for the first 18 digits, but differ starting with the 19th: ", "_____no_output_____" ] ], [ [ "(A ** 3 + B ** 3,\n C ** 3)", "_____no_output_____" ] ], [ [ "They say \"close\" only counts in horseshoes and hand grenades, and if you threw two horseshoes at a stake on the planet [Kapteyn-b](https://en.wikipedia.org/wiki/Kapteyn_b) (a possibly habitable and thus possibly horseshoe-playing exoplanet 12.8 light years from Earth) and the two paths differed in the 19th digit, the horseshoes would end up [less than an inch](https://www.google.com/webhp?sourceid=chrome-instant&ion=1&espv=2&ie=UTF-8#q=12.8%20light%20years%20*%201e-19%20in%20inches) apart. That's really, really close, but close doesn't count in number theory.\n\n\n# *The Simpsons* and Fermat\n\nIn two different [episodes of *The Simpsons*](http://www.npr.org/sections/krulwich/2014/05/08/310818693/did-homer-simpson-actually-solve-fermat-s-last-theorem-take-a-look), close counterexamples to Fermat's Last Theorem are shown: \n$1782^{12} + 1841^{12} = 1922^{12}$ and $3987^{12} + 4365^{12} = 4472^{12}$. These were designed by *Simpsons* writer David X. Cohen to be correct up to the precision found in most handheld calculators. Cohen found the equations with a program that must have been something like this:", "_____no_output_____" ] ], [ [ "from itertools import combinations\n\ndef simpsons(bases, powers):\n \"\"\"Find the integers (A, B, C, n) that come closest to solving \n Fermat's equation, A ** n + B ** n == C ** n. \n Let A, B range over all pairs of bases and n over all powers.\"\"\"\n equations = ((A, B, iroot(A ** n + B ** n, n), n)\n for A, B in combinations(bases, 2)\n for n in powers)\n return min(equations, key=relative_error)\n\ndef iroot(i, n): \n \"The integer closest to the nth root of i.\"\n return int(round(i ** (1./n)))\n\ndef relative_error(equation):\n \"Error between LHS and RHS of equation, relative to RHS.\" \n (A, B, C, n) = equation\n LHS = A ** n + B ** n\n RHS = C ** n\n return abs(LHS - RHS) / RHS", "_____no_output_____" ], [ "simpsons(range(1000, 2000), [11, 12, 13])", "_____no_output_____" ], [ "simpsons(range(3000, 5000), [12])", "_____no_output_____" ] ], [ [ "# Back to Beal\n\nIn October 2015 I looked back at my original [program from 2000](http://norvig.com/beal2000.html).\nI ported it from Python 1.5 to 3.5 (by putting parens around the argument to `print` and adding `long = int`). The program runs 250 times faster today than it did in 2000, a tribute to both computer hardware engineers and the developers of the Python interpreter.\n\nI found that I was a bit confused about the definition of [the problem in 2000](https://web.archive.org/web/19991127081319/http://bealconjecture.com/). I thought then that, *by definition*, $A$ and $B$ could not have a common factor, but actually, the definition of the conjecture only rules out examples where all three of $A, B, C$ share a common factor. Mark Tiefenbruck (and later Edward P. Berlin and Shen Lixing) wrote to point out that my thought was actually correct, not by definition, but by derivation: if $A$ and $B$ have a commmon prime factor $p$, then the sum of $A^x + B^y$ must also have that factor $p$, and since $A^x + B^y = C^z$, then $C^z$ and hence $C$ must have the factor $p$. So I was wrong twice, and in this case two wrongs did make a right.\n\nMark Tiefenbruck also suggested an optimization: only consider exponents that are odd primes, or 4. The idea is that a number like 512 can be expressed as either $2^9$ or $8^3$, and my program doesn't need to consider both. In general, any time we have a composite exponent, such as $b^{qp}$, where $p$ is prime, we should ignore $b^{(qp)}$, and instead consider only $(b^q)^p$. There's one complication to this scheme: 2 is a prime, but 2 is not a valid exponent for a Beal counterexample. So we will allow 4 as an exponent, as well as all odd primes up to `max_x`.\n\nHere is the complete, updated, refactored, optimized program:", "_____no_output_____" ] ], [ [ "from math import gcd, log\nfrom itertools import combinations, product\n\ndef beal(max_A, max_x):\n \"\"\"See if any A ** x + B ** y equals some C ** z, with gcd(A, B) == 1.\n Consider any 1 <= A,B <= max_A and x,y <= max_x, with x,y prime or 4.\"\"\"\n Apowers = make_Apowers(max_A, max_x)\n Czroots = make_Czroots(Apowers)\n for (A, B) in combinations(Apowers, 2):\n if gcd(A, B) == 1:\n for (Ax, By) in product(Apowers[A], Apowers[B]): \n Cz = Ax + By\n if Cz in Czroots:\n C = Czroots[Cz]\n x, y, z = exponent(Ax, A), exponent(By, B), exponent(Cz, C)\n print('{} ** {} + {} ** {} == {} ** {} == {}'\n .format(A, x, B, y, C, z, C ** z))\n\ndef make_Apowers(max_A, max_x): \n \"A dict of {A: [A**3, A**4, ...], ...}.\"\n exponents = exponents_upto(max_x)\n return {A: [A ** x for x in (exponents if (A != 1) else [3])]\n for A in range(1, max_A+1)}\n\ndef make_Czroots(Apowers): return {Cz: C for C in Apowers for Cz in Apowers[C]} \n \ndef exponents_upto(max_x):\n \"Return all odd primes up to max_x, as well as 4.\"\n exponents = [3, 4] if max_x >= 4 else [3] if max_x == 3 else []\n for x in range(5, max_x, 2):\n if not any(x % p == 0 for p in exponents):\n exponents.append(x)\n return exponents\n\ndef exponent(Cz, C): \n \"\"\"Recover z such that C ** z == Cz (or equivalently z = log Cz base C).\n For exponent(1, 1), arbitrarily choose to return 3.\"\"\"\n return 3 if (Cz == C == 1) else int(round(log(Cz, C)))", "_____no_output_____" ] ], [ [ "It takes less than a second to verify that there are no counterexamples for combinations up to $100^{100}$, a computation that took Andrew Beal thousands of hours on his 1990s-era computers:", "_____no_output_____" ] ], [ [ "%time beal(100, 100)", "CPU times: user 256 ms, sys: 1.44 ms, total: 257 ms\nWall time: 256 ms\n" ] ], [ [ "The execution time goes up roughly with the square of `max_A`, so the following should take about 100 times longer:", "_____no_output_____" ] ], [ [ "%time beal(1000, 100)", "CPU times: user 29.1 s, sys: 25.2 ms, total: 29.2 s\nWall time: 29.2 s\n" ] ], [ [ "# How `beal` Works\n\nThe function `beal` first does some precomputation, creating two data structures:\n* `Apowers`: a dict of the form `{A: [A**3, A**4, ...]}` giving the\nnonredundant powers (prime and 4th powers) of each base, `A`, from 3 to `max_x`.\n* `Czroots`: a dict of `{C**z : C}` pairs, giving the zth root of each power in `Apowers`.\n\nHere is a very small example Apowers table:", "_____no_output_____" ] ], [ [ "Apowers = make_Apowers(6, 10)\nApowers", "_____no_output_____" ] ], [ [ "Then we enumerate all combinations of two bases, `A` and `B`, from `Apowers`. Consider the combination where `A` is `3` and `B` is `6`. Of course `gcd(3, 6) == 3`, so the program would not consider them further, but imagine if they did not share a common factor. Then we would look at all possible `Ax + By` sums, for `Ax` in `[27, 81, 243, 2187]` and `By` in `[216, 1296, 7776, 279936].` One of these would be `27 + 216`, which sums to `243`. We look up `243` in `Czroots`:", "_____no_output_____" ] ], [ [ "Czroots = make_Czroots(Apowers)\nCzroots", "_____no_output_____" ], [ "Czroots[243]", "_____no_output_____" ] ], [ [ "We see that `243` is in `Czroots`, with value `3`, so this would be a counterexample (except for the common factor). The program uses the `exponent` function to recover the values of `x, y, z`, and prints the results.\n\n# Is the Program Correct?\n\nCan we gain confidence in the program? It is difficult to test `beal`, because the expected output is nothing, for all known inputs.\nOne thing we can do is verify that `beal` finds cases like `3 ** 3 + 6 ** 3 == 3 ** 5 == 243` that would be a counterexample except for the common factor `3`. We can test this by temporarily replacing the `gcd` function with a mock function that always reports no common factors:", "_____no_output_____" ] ], [ [ "def gcd(a, b): return 1\n\nbeal(100, 100)", "3 ** 3 + 6 ** 3 == 3 ** 5 == 243\n7 ** 7 + 49 ** 3 == 98 ** 3 == 941192\n8 ** 4 + 16 ** 3 == 2 ** 13 == 8192\n8 ** 5 + 32 ** 3 == 16 ** 4 == 65536\n9 ** 3 + 18 ** 3 == 9 ** 4 == 6561\n16 ** 5 + 32 ** 4 == 8 ** 7 == 2097152\n17 ** 4 + 34 ** 4 == 17 ** 5 == 1419857\n19 ** 4 + 38 ** 3 == 57 ** 3 == 185193\n27 ** 3 + 54 ** 3 == 3 ** 11 == 177147\n28 ** 3 + 84 ** 3 == 28 ** 4 == 614656\n34 ** 5 + 51 ** 4 == 85 ** 4 == 52200625\n" ] ], [ [ "Let's make sure all those expressions are true:", "_____no_output_____" ] ], [ [ "{3 ** 3 + 6 ** 3 == 3 ** 5 == 243,\n 7 ** 7 + 49 ** 3 == 98 ** 3 == 941192,\n 8 ** 4 + 16 ** 3 == 2 ** 13 == 8192,\n 8 ** 5 + 32 ** 3 == 16 ** 4 == 65536,\n 9 ** 3 + 18 ** 3 == 9 ** 4 == 6561,\n 16 ** 5 + 32 ** 4 == 8 ** 7 == 2097152,\n 17 ** 4 + 34 ** 4 == 17 ** 5 == 1419857,\n 19 ** 4 + 38 ** 3 == 57 ** 3 == 185193,\n 27 ** 3 + 54 ** 3 == 3 ** 11 == 177147,\n 28 ** 3 + 84 ** 3 == 28 ** 4 == 614656,\n 34 ** 5 + 51 ** 4 == 85 ** 4 == 52200625}", "_____no_output_____" ] ], [ [ "I get nervous having an incorrect version of `gcd` around; let's change it back, quick!", "_____no_output_____" ] ], [ [ "from math import gcd\n\nbeal(100, 100)", "_____no_output_____" ] ], [ [ "We can also provide some test cases for the subfunctions of `beal`:", "_____no_output_____" ] ], [ [ "def tests():\n assert make_Apowers(6, 10) == {\n 1: [1],\n 2: [8, 16, 32, 128],\n 3: [27, 81, 243, 2187],\n 4: [64, 256, 1024, 16384],\n 5: [125, 625, 3125, 78125],\n 6: [216, 1296, 7776, 279936]}\n \n assert make_Czroots(make_Apowers(5, 8)) == {\n 1: 1, 8: 2, 16: 2, 27: 3, 32: 2, 64: 4, 81: 3,\n 125: 5, 128: 2, 243: 3, 256: 4, 625: 5, 1024: 4,\n 2187: 3, 3125: 5, 16384: 4, 78125: 5}\n Czroots = make_Czroots(make_Apowers(100, 100))\n assert 3 ** 3 + 6 ** 3 in Czroots\n assert 99 ** 97 in Czroots\n assert 101 ** 100 not in Czroots\n assert Czroots[99 ** 97] == 99\n \n assert exponent(10 ** 5, 10) == 5\n assert exponent(7 ** 3, 7) == 3\n assert exponent(1234 ** 999, 1234) == 999\n assert exponent(12345 ** 6789, 12345) == 6789\n assert exponent(3 ** 10000, 3) == 10000\n assert exponent(1, 1) == 3\n \n assert exponents_upto(2) == []\n assert exponents_upto(3) == [3]\n assert exponents_upto(4) == [3, 4]\n assert exponents_upto(40) == [3, 4, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37]\n assert exponents_upto(100) == [\n 3, 4, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, \n 67, 71, 73, 79, 83, 89, 97]\n \n assert gcd(3, 6) == 3\n assert gcd(3, 7) == 1\n assert gcd(861591083269373931, 94815872265407) == 97\n assert gcd(2*3*5*(7**10)*(11**12), 3*(7**5)*(11**13)*17) == 3*(7**5)*(11**12)\n \n return 'tests pass'\n \ntests()", "_____no_output_____" ] ], [ [ "The program is mostly straightforward, but relies on the correctness of these arguments: \n\n* Are we justified in taking `combinations` without replacements from the `Apowers` table? In other words, are we sure there are no solutions of the form $A^x + A^x = C^z$? Yes, we can be sure, because then $2\\;A^x = C^z$, and all the factors of $A$ would also be factors of $C$.\n\n<p>\n* Are we justified in having a single value for each key in the `Czroots` table? Consider that $81 = 3^4 = 9^2$. We put `{81: 3}` in the table and discard `{81: 9}`, because any number that has 9 as a factor will always have 3 as a factor as well, so 3 is all we need to know. But what if a number could be formed with two bases where neither was a multiple of the other? For example, what if $2^7 = 5^3 = s$; then wouldn't we have to have both 2 and 5 as values for $s$ in the table? Fortunately, that can never happen, because of the [fundamental theorem of arithmetic](https://en.wikipedia.org/wiki/Fundamental_theorem_of_arithmetic).\n\n<p>\n* Could there be a rounding error involving the `exponent` function that was not caught by the tests? Possibly; but `exponent` is not used to find counterexamples, only to print them, so any such error wouldn't cause us to miss a counterexample.\n\n<p>\n* Are we justified in only considering exponents that are odd primes, or the number 4? In one sense, yes, because when we consider the two terms $A^{(qp)}$ and $(A^q)^p$, we find they are always equal, and always have the same prime factors (the factors of $A$), so for the purposes of the Beal problem, they are equivalent, and we only need consider one of them. In another sense, there is a difference. With this optimization, when we run `beal(6, 10)`, we are no longer testing $512$ as a value of $A$ or $B$, even though $512 = 2^9$ and both $2$ and $9$ are within range, because the program chooses to express $512$ as $8^3$, and $8$ is not in the specified range. So the program is still correctly searching for counterexamples, but the space that it searches for given `max_A` and `max_x` is different with this optimization.\n\n<p>\n* Are we really sure that when $A$ and $B$ have a common factor greater than 1, then\n$C$ also shares that common factor? Yes, because if $p$ is a factor of both $A$ and $B$, then it is a factor of $A^x + B^y$, and since we know this is equal to $C^z$, then $p$ must also be a factor of $C^z$, and thus a factor of $C$.\n\n", "_____no_output_____" ], [ "\n# Faster Arithmetic (mod *p*)\n\nArithmetic is slow with integers that have thousands of digits. If we want to explore much further, we'll have to make the program more efficient. An obvious improvement would be to do all the arithmetic module some number $m$. Then we know:\n\n$$\\mbox{if} ~~ \nA^x + B^y = C^z\n~~ \\mbox{then} ~~\n(A^x (\\mbox{mod} ~ m) + B^y (\\mbox{mod} ~ m)) (\\mbox{mod} ~ m) = C^z \\;(\\mbox{mod} ~ m)$$\n\n\nSo we can do efficient tests modulo $m$, and then do the full arithmetic only for combinations that work modulo $m$. Unfortunately there will be collisions (two numbers that are distinct, but are equal mod $m$), so the tables will have to have lists of values. Here is a simple, unoptimized implementation:", "_____no_output_____" ] ], [ [ "from math import gcd\nfrom itertools import combinations, product\nfrom collections import defaultdict\n \ndef beal_modm(max_A, max_x, m=2**31-1):\n \"\"\"See if any A ** x + B ** y equals some C ** z (mod p), with gcd(A, B) == 1.\n If so, verify that the equation works without the (mod m).\n Consider any 1 <= A,B <= max_A and x,y <= max_x, with x,y prime or 4.\"\"\"\n assert m >= max_A\n Apowers = make_Apowers_modm(max_A, max_x, m)\n Czroots = make_Czroots_modm(Apowers)\n for (A, B) in combinations(Apowers, 2):\n if gcd(A, B) == 1:\n for (Axm, x), (Bym, y) in product(Apowers[A], Apowers[B]): \n Czm = (Axm + Bym) % m\n if Czm in Czroots:\n lhs = A ** x + B ** y\n for (C, z) in Czroots[Czm]:\n if lhs == C ** z:\n print('{} ** {} + {} ** {} == {} ** {} == {}'\n .format(A, x, B, y, C, z, C ** z)) \n \n\ndef make_Apowers_modm(max_A, max_x, m): \n \"A dict of {A: [(A**3 (mod m), 3), (A**4 (mod m), 4), ...]}.\"\n exponents = exponents_upto(max_x)\n return {A: [(pow(A, x, m), x) for x in (exponents if (A != 1) else [3])]\n for A in range(1, max_A+1)}\n\ndef make_Czroots_modm(Apowers): \n \"A dict of {C**z (mod m): [(C, z),...]}\"\n Czroots = defaultdict(list)\n for A in Apowers:\n for (Axm, x) in Apowers[A]:\n Czroots[Axm].append((A, x))\n return Czroots ", "_____no_output_____" ] ], [ [ "Here we see that each entry in the `Apowers` table is a list of `(A**x (mod p), x)` pairs.\nFor example, $6^7 = 279,936$, so in our (mod 1000) table we have the pair `(936, 7)` under `6`.", "_____no_output_____" ] ], [ [ "Apowers = make_Apowers_modm(6, 10, 1000)\nApowers", "_____no_output_____" ] ], [ [ "And each item in the `Czroots` table is of the form `{C**z (mod m): [(C, z), ...]}`.\nFor example, `936: [(6, 7)]`.", "_____no_output_____" ] ], [ [ "make_Czroots_modm(Apowers)", "_____no_output_____" ] ], [ [ "Let's run the program:", "_____no_output_____" ] ], [ [ "%time beal_modm(1000, 100)", "CPU times: user 35.5 s, sys: 44.1 ms, total: 35.5 s\nWall time: 35.5 s\n" ] ], [ [ "We don't see a speedup here, but the idea is that as we start dealing with much larger integers, this version should be faster. I could improve this version by caching certain computations, managing the memory layout better, moving some computations out of loops, considering using multiple different numbers as the modulus (as in a Bloom filter), finding a way to parallelize the program, and re-coding in a faster compiled language (such as C++ or Go or Julia). Then I could invest thousands (or millions) of CPU hours searching for counterexamples. \n\nBut [Witold Jarnicki](https://plus.sandbox.google.com/+WitoldJarnicki/posts) and [David Konerding](http://www.konerding.com/~dek/) already did that: they wrote a C++ program that, in parallel across thousands of machines, searched for $A, B$ up to 200,000 and $x, y$ up to 5,000, but found no counterexamples. So I don't think it is worthwhile to continue on that path.\n\n# Conclusion\n\nThis was fun, but I can't recommend anyone spend a serious amount of computer time looking for counterexamples to the Beal Conjecture&mdash;the money you would have to spend in computer time would be more than the expected value of your prize winnings. I suggest you work on a proof rather than a counterexample, or work on some other interesting problem instead!", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
c52c757d231e5e764ca8c882fa401d3a87420b71
31,902
ipynb
Jupyter Notebook
dcorr/dcorr_investigation.ipynb
ebridge2/batch_effects
6051c060e852b0839b9fe463dafa2fe24a94cf61
[ "Apache-2.0" ]
1
2021-09-08T00:25:54.000Z
2021-09-08T00:25:54.000Z
dcorr/dcorr_investigation.ipynb
ebridge2/batch_effects
6051c060e852b0839b9fe463dafa2fe24a94cf61
[ "Apache-2.0" ]
1
2020-08-09T10:52:03.000Z
2020-08-09T10:52:03.000Z
dcorr/dcorr_investigation.ipynb
ebridge2/batch_effects
6051c060e852b0839b9fe463dafa2fe24a94cf61
[ "Apache-2.0" ]
1
2021-09-08T13:16:57.000Z
2021-09-08T13:16:57.000Z
36.006772
309
0.409159
[ [ [ "from hyppo.ksample import KSample\nfrom hyppo.independence import Dcorr\nfrom combat import combat\nimport pandas as pd\nimport glob\nimport os\nimport graspy as gp\nimport numpy as np\nfrom dask.distributed import Client, progress\nimport dask.dataframe as ddf\nfrom scipy.stats import zscore, rankdata, mannwhitneyu\nimport copy\nimport math\nimport networkx as nx\nfrom graspy.models import SIEMEstimator as siem", "_____no_output_____" ], [ "def get_sub(fname):\n stext = os.path.basename(fname).split('_')\n return('{}_{}_{}'.format(stext[0], stext[1], stext[3]))\n\ndef get_sub_pheno_dat(subid, scan, pheno_dat):\n matches = pheno_dat.index[pheno_dat[\"SUBID\"] == int(subid)].tolist()\n match = np.min(matches)\n return(int(pheno_dat.iloc[match][\"SEX\"]))\n\ndef get_age_pheno_dat(subid, scan, pheno_dat):\n matches = pheno_dat.index[pheno_dat[\"SUBID\"] == int(subid)].tolist()\n match = np.min(matches)\n return(int(pheno_dat.iloc[match][\"AGE_AT_SCAN_1\"]))\n\ndef apply_along_dataset(scs, dsets, fn):\n scs_xfmd = np.zeros(scs.shape)\n for dset in np.unique(dsets):\n scs_xfmd[dsets == dset,:] = np.apply_along_axis(fn, 0, scs[dsets == dset,:])\n return(scs_xfmd)\n\ndef apply_along_individual(scs, fn):\n scs_xfmd = np.zeros(scs.shape)\n\ndef zsc(x):\n x_ch = copy.deepcopy(x)\n if (np.var(x_ch) > 0):\n x_ch = (x_ch - np.mean(x_ch))/np.std(x_ch)\n return x_ch\n else:\n return np.zeros(x_ch.shape)\n \n \ndef ptr(x):\n x_ch = copy.deepcopy(x)\n nz = x[x != 0]\n x_rank = rankdata(nz)*2/(len(nz) + 1)\n x_ch[x_ch != 0] = x_rank\n if (np.min(x_ch) != np.max(x_ch)):\n x_ch = (x_ch - np.min(x_ch))/(np.max(x_ch) - np.min(x_ch))\n return(x_ch)", "_____no_output_____" ], [ "basepath = '/mnt/nfs2/MR/cpac_3-9-2/'\npheno_basepath = '/mnt/nfs2/MR/all_mr/phenotypic/'\ndatasets = os.listdir(basepath)\ntry:\n datasets.remove(\"phenotypic\")\nexcept:\n print(\"No phenotypic folder in `datasets`.\")\nprint(datasets)", "No phenotypic folder in `datasets`.\n['UPSM1', 'BNU1', 'HNU1', 'IBATRT', 'IPCAS1', 'Utah1', 'SWU4', 'NYU2', 'UWM', 'NKI24_std2500', 'BNU2', 'BNU3', 'NYU1', 'XHCUMS', 'IPCAS5', 'IPCAS6', 'JHNU', 'IPCAS8', 'LMU3', 'DC1', 'IACAS', 'IPCAS', 'MRNTRT', 'NKI24_mx1400', 'KKI2009', 'SWU2', 'NKI24_mx645', 'SWU3', 'MPG1', 'IPCAS2', 'SWU1', 'UM']\n" ], [ "fmri_dict = {}\npheno_dat = {}\n\nfor i, dataset in enumerate(datasets):\n try:\n try:\n pheno_dat[dataset] = pd.read_csv('{}{}_phenotypic_data.csv'.format(pheno_basepath, dataset))\n except:\n raise ValueError(\"Dataset: {} does not have a phenotypic file.\".format(dataset))\n scan_dict = {}\n sex_dict = []\n age_dict = []\n dset_dir = os.path.join('{}{}/graphs/FSL_nff_nsc_gsr_des'.format(basepath, dataset), '*.ssv')\n files_ds = glob.glob(dset_dir)\n successes = len(files_ds)\n for f in files_ds:\n try:\n gr_dat = gp.utils.import_edgelist(f)\n sub = get_sub(f)\n scansub = sub.split('_')\n sex = get_sub_pheno_dat(scansub[1], scansub[2], pheno_dat[dataset])\n age = get_age_pheno_dat(scansub[1], scansub[2], pheno_dat[dataset])\n scan_dict[sub] = gr_dat.flatten()\n sex_dict.append(sex)\n age_dict.append(age)\n except Exception as e:\n successes -= 1\n print(\"Dataset: {} has {}/{} successes.\".format(dataset, successes, len(files_ds)))\n if (successes < 5):\n raise ValueError(\"Dataset: {} does not have enough successes.\".format(dataset))\n fmri_dict[dataset] = {}\n fmri_dict[dataset][\"scans\"] = np.vstack(list(scan_dict.values()))\n fmri_dict[dataset][\"subs\"] = list(scan_dict.keys())\n fmri_dict[dataset][\"sex\"] = sex_dict\n fmri_dict[dataset][\"age\"] = age_dict\n fmri_dict[dataset][\"dataset\"] = [i + 1 for j in range(0, fmri_dict[dataset][\"scans\"].shape[0])]\n except Exception as e:\n print(\"Error in {} Dataset.\".format(dataset))\n print(e)", "Dataset: UPSM1 has 230/230 successes.\nDataset: BNU1 has 100/100 successes.\nDataset: HNU1 has 300/300 successes.\nDataset: IBATRT has 50/50 successes.\nDataset: IPCAS1 has 60/60 successes.\nDataset: Utah1 has 52/52 successes.\nDataset: SWU4 has 466/467 successes.\nDataset: NYU2 has 4/252 successes.\nError in NYU2 Dataset.\nDataset: NYU2 does not have enough successes.\nDataset: UWM has 50/50 successes.\nError in NKI24_std2500 Dataset.\nDataset: NKI24_std2500 does not have a phenotypic file.\nDataset: BNU2 has 10/100 successes.\nDataset: BNU3 has 48/48 successes.\nDataset: NYU1 has 75/75 successes.\nDataset: XHCUMS has 115/120 successes.\nDataset: IPCAS5 has 44/44 successes.\nDataset: IPCAS6 has 30/30 successes.\nDataset: JHNU has 60/60 successes.\nDataset: IPCAS8 has 26/26 successes.\nDataset: LMU3 has 50/50 successes.\nError in DC1 Dataset.\nDataset: DC1 does not have a phenotypic file.\nDataset: IACAS has 59/59 successes.\nError in IPCAS Dataset.\nDataset: IPCAS does not have a phenotypic file.\nDataset: MRNTRT has 87/88 successes.\nError in NKI24_mx1400 Dataset.\nDataset: NKI24_mx1400 does not have a phenotypic file.\nDataset: KKI2009 has 0/42 successes.\nError in KKI2009 Dataset.\nDataset: KKI2009 does not have enough successes.\nDataset: SWU2 has 54/54 successes.\nError in NKI24_mx645 Dataset.\nDataset: NKI24_mx645 does not have a phenotypic file.\nDataset: SWU3 has 46/48 successes.\nDataset: MPG1 has 21/21 successes.\nDataset: IPCAS2 has 68/70 successes.\nDataset: SWU1 has 59/59 successes.\nDataset: UM has 160/160 successes.\n" ], [ "def run_experiment(row):\n try:\n ds1 = row[0]; ds2 = row[1]; sxfm=row[2]; dxfm = row[3]\n scans = np.vstack((fmri_dict[ds1][\"scans\"], fmri_dict[ds2][\"scans\"]))\n scans = scans[:,~np.all(scans == 0, axis=0)]\n sex = np.array(fmri_dict[ds1][\"sex\"] + fmri_dict[ds2][\"sex\"])\n age = np.array(fmri_dict[ds1][\"age\"] + fmri_dict[ds2][\"age\"])\n datasets = np.array([1 for i in range(0, fmri_dict[ds1][\"scans\"].shape[0])] + [2 for i in range(0, fmri_dict[ds2][\"scans\"].shape[0])])\n # apply per-individual transform\n if sxfm == \"ptr\":\n scans = np.apply_along_axis(ptr, 1, scans)\n # apply per-dataset edgewise transform\n if dxfm == \"raw\":\n scans = scans\n elif dxfm == \"zscore\":\n scans = apply_along_dataset(scans, datasets, zsc)\n elif dxfm == \"ptr\":\n scans = apply_along_dataset(scans, datasets, ptr)\n elif dxfm == \"combat\":\n scans = np.array(combat(pd.DataFrame(scans.T), datasets, model=None, numerical_covariates=None)).T\n try:\n eff_batch = KSample(\"DCorr\").test(scans[datasets == 1,:], scans[datasets == 2,:])\n except:\n eff_batch = (None, None)\n try:\n eff_sex = KSample(\"DCorr\").test(scans[sex == 1,:], scans[sex == 2,:])\n except:\n eff_sex = (None, None)\n try:\n eff_age = Dcorr().test(scans, age)\n except:\n eff_age = (None, None)\n except:\n eff_batch = (None, None)\n eff_sex = (None, None)\n eff_age = (None, None)\n return (row[0], row[1], row[2], row[3], eff_batch[0], eff_batch[1], eff_sex[0], eff_sex[1], eff_age[0], eff_age[1])", "_____no_output_____" ] ], [ [ "# Experiments\n\n## Effects", "_____no_output_____" ] ], [ [ "ncores = 99\nclient = Client(threads_per_worker=1, n_workers=ncores)", "/home/eric/.virtualenvs/batch/lib/python3.6/site-packages/distributed/node.py:155: UserWarning: Port 8787 is already in use.\nPerhaps you already have a cluster running?\nHosting the HTTP server on port 43081 instead\n http_address[\"port\"], self.http_server.port\n" ], [ "exps = []\ndatasets = list(fmri_dict.keys())\nfor sxfm in [\"raw\", \"ptr\"]:\n for i, ds1 in enumerate(datasets):\n for j in range(i+1, len(datasets)):\n for dxfm in [\"raw\", \"ptr\", \"zscore\", \"combat\"]:\n exps.append([ds1, datasets[j], sxfm, dxfm])\nsim_exps = pd.DataFrame(exps, columns=[\"Dataset1\", \"Dataset2\", \"Sxfm\", \"Dxfm\"])\nprint(sim_exps.head(n=30))", " Dataset1 Dataset2 Sxfm Dxfm\n0 UPSM1 BNU1 raw raw\n1 UPSM1 BNU1 raw ptr\n2 UPSM1 BNU1 raw zscore\n3 UPSM1 BNU1 raw combat\n4 UPSM1 HNU1 raw raw\n5 UPSM1 HNU1 raw ptr\n6 UPSM1 HNU1 raw zscore\n7 UPSM1 HNU1 raw combat\n8 UPSM1 IBATRT raw raw\n9 UPSM1 IBATRT raw ptr\n10 UPSM1 IBATRT raw zscore\n11 UPSM1 IBATRT raw combat\n12 UPSM1 IPCAS1 raw raw\n13 UPSM1 IPCAS1 raw ptr\n14 UPSM1 IPCAS1 raw zscore\n15 UPSM1 IPCAS1 raw combat\n16 UPSM1 Utah1 raw raw\n17 UPSM1 Utah1 raw ptr\n18 UPSM1 Utah1 raw zscore\n19 UPSM1 Utah1 raw combat\n20 UPSM1 SWU4 raw raw\n21 UPSM1 SWU4 raw ptr\n22 UPSM1 SWU4 raw zscore\n23 UPSM1 SWU4 raw combat\n24 UPSM1 UWM raw raw\n25 UPSM1 UWM raw ptr\n26 UPSM1 UWM raw zscore\n27 UPSM1 UWM raw combat\n28 UPSM1 BNU2 raw raw\n29 UPSM1 BNU2 raw ptr\n" ], [ "sim_exps = ddf.from_pandas(sim_exps, npartitions=ncores)\nsim_results = sim_exps.apply(lambda x: run_experiment(x), axis=1, result_type='expand',\n meta={0: str, 1: str, 2: str, 3: str, 4: float, 5: float, 6: float, 7: float,\n 8: float, 9: float})\nsim_results", "_____no_output_____" ], [ "sim_results = sim_results.compute(scheduler=\"multiprocessing\")\nsim_results = sim_results.rename(columns={0: \"Dataset1\", 1: \"Dataset2\", 2: \"Sxfm\", 3: \"Dxfm\", 4: \"Effect.Batch\",\n 5: \"pvalue.Batch\", 6: \"Effect.Sex\", 7: \"pvalue.Sex\",\n 8: \"Effect.Age\", 9: \"pvalue.Age\"})\nsim_results.to_csv('../data/dcorr/batch_results.csv')\nsim_results.head(n=20)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
c52c797a941cb5a7f4cee26bed724d40f9c6b486
171,440
ipynb
Jupyter Notebook
mean_memory_utilization_nodes.ipynb
sara-nl/SURFsara-Trace-Archive
fb053f241b761c7e6dd863f5b87c8e90428b1f6c
[ "Apache-2.0" ]
1
2020-06-17T09:59:15.000Z
2020-06-17T09:59:15.000Z
mean_memory_utilization_nodes.ipynb
sara-nl/SURFace
fb053f241b761c7e6dd863f5b87c8e90428b1f6c
[ "Apache-2.0" ]
null
null
null
mean_memory_utilization_nodes.ipynb
sara-nl/SURFace
fb053f241b761c7e6dd863f5b87c8e90428b1f6c
[ "Apache-2.0" ]
null
null
null
313.992674
63,840
0.919651
[ [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nfrom plotnine import *\nfrom datetime import datetime\nimport pytz\n\n%matplotlib inline", "_____no_output_____" ], [ "memory_free = pd.read_parquet(\"path to machine metric dataset/node_memory_MemFree/\")\nmemory_free = memory_free / (1024 * 1024 * 1024)", "_____no_output_____" ], [ "memory_total = pd.read_parquet(\"path to machine metric dataset/node_memory_MemTotal/\")\nmemory_total = memory_total / (1024 * 1024 * 1024)\n# print(memory_total.values.max())\n# print(len(memory_total.columns))", "_____no_output_____" ], [ "for c in memory_total.columns:\n if memory_total[c].max() >= 257:\n print(c)", "r23n23\nr23n26\n" ], [ "mem_used_df = pd.read_parquet(\"path to machine metric dataset/node_memory_MemTotal%20-%20node_memory_MemFree%20-%20node_memory_Buffers%20-%20node_memory_Cached%20-%20node_memory_Slab%20-%20node_memory_PageTables%20-%20node_memory_SwapCached/\")\nmem_used_df = mem_used_df / (1024 * 1024 * 1024)\nprint(mem_used_df.max().max())", "2000.2234001159668\n" ], [ "# memory_used_fraction_df = 1 - (df / mem_total_df)", "_____no_output_____" ], [ "mean_memory_per_node = mem_used_df.mean() * 100\n\n# TODO: rewrite this. Devide based on index and the column names to get the true utilization levels.\nfig = plt.figure(figsize=(40,12))\nax = mean_memory_per_node.plot.hist(bins=math.ceil(mean_memory_per_node.max()))\nax.set_xlim(0,)\nax.set_xlabel(\"Mean Memory Utilization (%)\", fontsize=40)\nax.set_ylabel(\"Frequency\", fontsize=40)\nax.tick_params(axis='both', which='major', labelsize=40)\nax.tick_params(axis='both', which='minor', labelsize=32)\nax.set_title(\"Histogram of Mean Memory Utilization in LISA\", fontsize=50)\nplt.savefig(\"mean_memory_utilization_percentage.pdf\")", "_____no_output_____" ], [ "def normalize(df):\n df = df.value_counts(sort=False, normalize=True).rename_axis('target').reset_index(name='pdf')\n df[\"cdf\"] = df[\"pdf\"].cumsum()\n return df\n\nall_values_df = pd.DataFrame({\"target\": mem_used_df.values.ravel()})\ncount_df = normalize(all_values_df)\ndel all_values_df\n", "_____no_output_____" ], [ "# Add a row at the start so that the CDF starts at 0 and ends at 1 (in case we only have one datapoint in the DF)\nplot_df = count_df.copy()\nplot_df.index = plot_df.index + 1 # shifting index\nplot_df.reset_index(inplace=True)\nplot_df['index'] = plot_df['index'] + 1\nplot_df.loc[0] = [0, -math.inf, 0, 0] # add a row at the start (index, count, pdf, cdf)\nplot_df.loc[len(plot_df)] = [len(plot_df), math.inf, 1, 1]\nplot_df.sort_index(inplace=True)\n\nggplt = ggplot(plot_df) + \\\n theme_light(base_size=18) + \\\n theme(figure_size = (8,3)) + \\\n geom_step(aes(x=\"target\", y=\"cdf\"), size=1) +\\\n xlab(\"Node RAM Usage [GB]\") + \\\n ylab(\"ECDF\")\n\n\nmarker_x = [96, 192, 256, 1024, 2048]\nmarker_y = []\nfor i in marker_x:\n marker_y.append(count_df[count_df['target'].le(i) | np.isclose(count_df['target'], i, rtol=1e-10, atol=1e-12)].tail(1)['cdf'].iloc[0])\n\n\nmarker_labels = [\"96GB\", \"192GB\", \"256GB\", \"1TB\", \"2TB\"]\nmarkers = ['o', 'v', 'p', 's', 'd']\n\nfig = ggplt.draw(return_ggplot=False)\nax = plt.gca()\nfor x_pos, y_pos, marker_symbol, marker_label in zip(marker_x, marker_y, markers, marker_labels):\n ax.scatter(x_pos, y_pos, marker=marker_symbol, label=marker_label, facecolors=\"None\", edgecolors=\"black\")\nax.legend(ncol=len(marker_x), loc=4, prop={'size': 10})\n\ndate_time = datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\")\n\nfig.tight_layout()\nfig.savefig(\"memory_usage_cdf_nodes_{}.pdf\".format(date_time))", "_____no_output_____" ], [ "count_df", "_____no_output_____" ], [ "t = '''\\\\begin{{table}}[]\n\\\\caption{{RAM usage (GB) per percentage within Lisa.}}\n\\\\label{{surfing:tbl:ram-usage-percentiles}}\n\\\\begin{{tabular}}{{@{{}}lrrrrrrr@{{}}}}\n\\\\toprule\n & 1\\\\% & 25\\\\% & 50\\\\% & 75\\\\% & 90\\\\% & 99\\\\% & 100\\\\% \\\\\\\\ \\\\midrule\nRAM & {0} & {1} & {2} & {3} & {4} & {5} & {6} \\\\\\\\ \\\\bottomrule\n\\\\end{{tabular}}\n\\\\end{{table}}'''\n\n# print(count_df[-1:]['cdf'] - 1.0)\n# print(count_df[-1:]['cdf'] >= 1.0)\n# print(count_df[-1:]['cdf'].ge(1.0))\n# print(np.isclose(count_df[-5:]['cdf'], 1.0, rtol=1e-10, atol=1e-12))\n# print(count_df[count_df['cdf'].ge(0.25) | np.isclose(count_df['cdf'], .25, rtol=1e-10, atol=1e-12)].iloc[0])\n\npercentages = [0.01, 0.25, 0.50, 0.75, 0.90, 0.99, 1]\nvalues = []\n\nfor p in percentages:\n values.append(count_df[count_df['cdf'].ge(p) | np.isclose(count_df['cdf'], p, rtol=1e-8, atol=1e-12)].iloc[0]['target'])\n\nprint(t.format(*[\"{:.2f}\".format(v) for v in values]))", "\\begin{table}[]\n\\caption{RAM usage (GB) per percentage within Lisa.}\n\\label{surfing:tbl:ram-usage-percentiles}\n\\begin{tabular}{@{}lrrrrrrr@{}}\n\\toprule\n & 1\\% & 25\\% & 50\\% & 75\\% & 90\\% & 99\\% & 100\\% \\\\ \\midrule\nRAM & 0.64 & 1.46 & 3.65 & 8.07 & 20.99 & 58.06 & 2000.22 \\\\ \\bottomrule\n\\end{tabular}\n\\end{table}\n" ], [ "t = '''\\\\begin{{table}}[]\n\\\\caption{{Fraction of RAM usage below or at specified level.}}\n\\\\label{{surfing:tbl:ram-usage-fraction}}\n\\\\begin{{tabular}}{{@{{}}lrrrrrr@{{}}}}\n\\\\toprule\n & 96GB & 192GB & 256GB & 1TB & 2TB \\\\\\\\ \\\\midrule\nPercentage & {0} & {1} & {2} & {3} & {4} \\\\\\\\ \\\\bottomrule\n\\\\end{{tabular}}\n\\\\end{{table}}'''\n\nRAMs = [96, 192, 256, 1024, 2048]\nvalues = []\n\nfor r in RAMs:\n values.append(count_df[count_df['target'].le(r) | np.isclose(count_df['target'], r, rtol=1e-10, atol=1e-12)].tail(1)['cdf'].iloc[0])\n\nprint(t.format(*[\"{:.2f}\".format(v) for v in values]))", "\\begin{table}[]\n\\caption{Fraction of RAM usage below or at specified level.}\n\\label{surfing:tbl:ram-usage-fraction}\n\\begin{tabular}{@{}lrrrrr@{}}\n\\toprule\n & 96 & 192 & 256 & & 1024 & 2048 \\\\ \\midrule\nPercentage & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 \\\\ \\bottomrule\n\\end{tabular}\n\\end{table}\n" ], [ "# This cell outputs the special node 1128 in rack 1128, the 2TB RAM node\n# Unobfuscated it is r23n23\nmemory_free_1128 = memory_free['r23n23']\nmemory_total_1128 = memory_total['r23n23']\n\ndf_1128 = memory_free_1128.to_frame(name = 'free').join(memory_total_1128.to_frame(name='total')).replace(-1, np.NaN)\n\ndf_1128['util'] = 1 - (df_1128['free'] / df_1128['total'])\ndf_1128[\"dt\"] = pd.to_datetime(df_1128.index, utc=True, unit=\"s\")\ndf_1128[\"dt\"] = df_1128[\"dt\"].dt.tz_convert(pytz.timezone('Europe/Amsterdam')).dt.tz_localize(None)\ndf_1128 = df_1128.set_index(\"dt\")", "_____no_output_____" ], [ "df_1128['util']", "_____no_output_____" ], [ "def get_converted_xticks(ax):\n \"\"\"\n :param ax:\n :return list of day and month strings\n \"\"\"\n return [pd.to_datetime(tick.get_text()).date().strftime(\"%d\\n%b\") for tick in ax.get_xticklabels()]\n\nmedian = df_1128['util'].median() * 100\nmedian_zeroes_filtered = df_1128['util'][df_1128['util'] > 0].median() * 100\navg = df_1128['util'].mean() * 100\n\nprint(median, avg)\n# print(df_1128['util'])\n\nfig, ax = plt.subplots(figsize=(11, 5))\nax.plot(df_1128['util'] * 100, color=\"lightcoral\")\nax.set_ylabel(\"RAM Utilization [%]\", fontsize=18)\nax.set_xlabel(\"\", fontsize=14)\nax.tick_params(axis='both', which='major', labelsize=16)\nax.tick_params(axis='both', which='minor', labelsize=16)\n\nax.axhline(avg, label=\"Average ({:.3f})\".format(avg), color=\"steelblue\", linestyle=\"solid\")\nax.axhline(median, label=\"Median ({:.3f})\".format(median), color=\"yellowgreen\", linestyle=\"dashed\")\nax.axhline(median_zeroes_filtered, label=\"Median zeros filtered ({:.3f})\".format(median_zeroes_filtered), color=\"black\", linestyle=\"dotted\")\n\nax.legend(ncol=3, prop={\"size\": 14}, bbox_to_anchor=(0.5, 1.15), loc=9)\n\nfig.tight_layout()\n# This call needs to be after the tight_layout call so that the labels have been set!\nax.set_xticklabels(get_converted_xticks(ax))\n\ndate_time = datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\")\nfig.savefig(\"ram_utilization_node1128_{}.pdf\".format(date_time), bbox_inches='tight')", "0.2740170263408781 15.536085501122136\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
c52c894f9b76639e7b970857c9e83b89e0b6c656
18,364
ipynb
Jupyter Notebook
notebooks/Dstripes/Basic/convolutional/VAE/DstripesVAE_Convolutional_reconst_1ellwlb_01psnr.ipynb
Fidan13/Generative_Models
2c700da53210a16f75c468ba521061106afa6982
[ "MIT" ]
null
null
null
notebooks/Dstripes/Basic/convolutional/VAE/DstripesVAE_Convolutional_reconst_1ellwlb_01psnr.ipynb
Fidan13/Generative_Models
2c700da53210a16f75c468ba521061106afa6982
[ "MIT" ]
null
null
null
notebooks/Dstripes/Basic/convolutional/VAE/DstripesVAE_Convolutional_reconst_1ellwlb_01psnr.ipynb
Fidan13/Generative_Models
2c700da53210a16f75c468ba521061106afa6982
[ "MIT" ]
null
null
null
22.477356
185
0.557395
[ [ [ "# Settings", "_____no_output_____" ] ], [ [ "%env TF_KERAS = 1\nimport os\nsep_local = os.path.sep\n\nimport sys\nsys.path.append('..'+sep_local+'..')\nprint(sep_local)", "_____no_output_____" ], [ "os.chdir('..'+sep_local+'..'+sep_local+'..'+sep_local+'..'+sep_local+'..')\nprint(os.getcwd())", "_____no_output_____" ], [ "import tensorflow as tf\nprint(tf.__version__)", "_____no_output_____" ] ], [ [ "# Dataset loading", "_____no_output_____" ] ], [ [ "dataset_name='Dstripes'", "_____no_output_____" ], [ "import tensorflow as tf", "_____no_output_____" ], [ "train_ds = tf.data.Dataset.from_generator(\n lambda: training_generator, \n output_types=tf.float32 ,\n output_shapes=tf.TensorShape((batch_size, ) + image_size)\n)\n\ntest_ds = tf.data.Dataset.from_generator(\n lambda: testing_generator, \n output_types=tf.float32 ,\n output_shapes=tf.TensorShape((batch_size, ) + image_size)\n)", "_____no_output_____" ], [ "_instance_scale=1.0\nfor data in train_ds:\n _instance_scale = float(data[0].numpy().max())\n break", "_____no_output_____" ], [ "_instance_scale", "_____no_output_____" ], [ "import numpy as np\nfrom collections.abc import Iterable", "_____no_output_____" ], [ "if isinstance(inputs_shape, Iterable):\n _outputs_shape = np.prod(inputs_shape)", "_____no_output_____" ], [ "_outputs_shape", "_____no_output_____" ] ], [ [ "# Model's Layers definition", "_____no_output_____" ] ], [ [ "units=20\nc=50\nmenc_lays = [\n tf.keras.layers.Conv2D(filters=units//2, kernel_size=3, strides=(2, 2), activation='relu'),\n tf.keras.layers.Conv2D(filters=units*9//2, kernel_size=3, strides=(2, 2), activation='relu'),\n tf.keras.layers.Flatten(),\n # No activation\n tf.keras.layers.Dense(latents_dim)\n]\n\nvenc_lays = [\n tf.keras.layers.Conv2D(filters=units//2, kernel_size=3, strides=(2, 2), activation='relu'),\n tf.keras.layers.Conv2D(filters=units*9//2, kernel_size=3, strides=(2, 2), activation='relu'),\n tf.keras.layers.Flatten(),\n # No activation\n tf.keras.layers.Dense(latents_dim)\n]\n\ndec_lays = [\n tf.keras.layers.Dense(units=units*c*c, activation=tf.nn.relu),\n tf.keras.layers.Reshape(target_shape=(c , c, units)),\n tf.keras.layers.Conv2DTranspose(filters=units, kernel_size=3, strides=(2, 2), padding=\"SAME\", activation='relu'),\n tf.keras.layers.Conv2DTranspose(filters=units*3, kernel_size=3, strides=(2, 2), padding=\"SAME\", activation='relu'),\n \n # No activation\n tf.keras.layers.Conv2DTranspose(filters=3, kernel_size=3, strides=(1, 1), padding=\"SAME\")\n]", "_____no_output_____" ] ], [ [ "# Model definition", "_____no_output_____" ] ], [ [ "model_name = dataset_name+'VAE_Convolutional_reconst_1ell_01psnr'\nexperiments_dir='experiments'+sep_local+model_name", "_____no_output_____" ], [ "from training.autoencoding_basic.autoencoders.VAE import VAE as AE", "_____no_output_____" ], [ "inputs_shape=image_size", "_____no_output_____" ], [ "variables_params = \\\n[\n {\n 'name': 'inference_mean', \n 'inputs_shape':inputs_shape,\n 'outputs_shape':latents_dim,\n 'layers': menc_lays\n }\n\n ,\n \n {\n 'name': 'inference_logvariance', \n 'inputs_shape':inputs_shape,\n 'outputs_shape':latents_dim,\n 'layers': venc_lays\n }\n\n ,\n \n {\n 'name': 'generative', \n 'inputs_shape':latents_dim,\n 'outputs_shape':inputs_shape,\n 'layers':dec_lays\n }\n]", "_____no_output_____" ], [ "from utils.data_and_files.file_utils import create_if_not_exist", "_____no_output_____" ], [ "_restore = os.path.join(experiments_dir, 'var_save_dir')", "_____no_output_____" ], [ "create_if_not_exist(_restore)\n_restore", "_____no_output_____" ], [ "#to restore trained model, set filepath=_restore", "_____no_output_____" ], [ "ae = AE( \n name=model_name,\n latents_dim=latents_dim,\n batch_size=batch_size,\n variables_params=variables_params, \n filepath=None\n )", "_____no_output_____" ], [ "from evaluation.quantitive_metrics.peak_signal_to_noise_ratio import prepare_psnr\nfrom statistical.losses_utilities import similarty_to_distance\nfrom statistical.ae_losses import expected_loglikelihood_with_lower_bound as ellwlb", "_____no_output_____" ], [ "ae.compile(loss={'x_logits': lambda x_true, x_logits: ellwlb(x_true, x_logits)+ 0.1*similarity_to_distance(prepare_psnr([ae.batch_size]+ae.get_inputs_shape()))(x_true, x_logits)})", "_____no_output_____" ] ], [ [ "# Callbacks", "_____no_output_____" ] ], [ [ "\nfrom training.callbacks.sample_generation import SampleGeneration\nfrom training.callbacks.save_model import ModelSaver", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "es = tf.keras.callbacks.EarlyStopping(\n monitor='loss', \n min_delta=1e-12, \n patience=12, \n verbose=1, \n restore_best_weights=False\n)", "_____no_output_____" ], [ "ms = ModelSaver(filepath=_restore)", "_____no_output_____" ], [ "csv_dir = os.path.join(experiments_dir, 'csv_dir')\ncreate_if_not_exist(csv_dir)\ncsv_dir = os.path.join(csv_dir, ae.name+'.csv')\ncsv_log = tf.keras.callbacks.CSVLogger(csv_dir, append=True)\ncsv_dir", "_____no_output_____" ], [ "image_gen_dir = os.path.join(experiments_dir, 'image_gen_dir')\ncreate_if_not_exist(image_gen_dir)", "_____no_output_____" ], [ "sg = SampleGeneration(latents_shape=latents_dim, filepath=image_gen_dir, gen_freq=5, save_img=True, gray_plot=False)", "_____no_output_____" ] ], [ [ "# Model Training", "_____no_output_____" ] ], [ [ "ae.fit(\n x=train_ds,\n input_kw=None,\n steps_per_epoch=int(1e4),\n epochs=int(1e6), \n verbose=2,\n callbacks=[ es, ms, csv_log, sg],\n workers=-1,\n use_multiprocessing=True,\n validation_data=test_ds,\n validation_steps=int(1e4)\n)", "_____no_output_____" ] ], [ [ "# Model Evaluation", "_____no_output_____" ], [ "## inception_score", "_____no_output_____" ] ], [ [ "from evaluation.generativity_metrics.inception_metrics import inception_score", "_____no_output_____" ], [ "is_mean, is_sigma = inception_score(ae, tolerance_threshold=1e-6, max_iteration=200)\nprint(f'inception_score mean: {is_mean}, sigma: {is_sigma}')", "_____no_output_____" ] ], [ [ "## Frechet_inception_distance", "_____no_output_____" ] ], [ [ "from evaluation.generativity_metrics.inception_metrics import frechet_inception_distance", "_____no_output_____" ], [ "fis_score = frechet_inception_distance(ae, training_generator, tolerance_threshold=1e-6, max_iteration=10, batch_size=32)\nprint(f'frechet inception distance: {fis_score}')", "_____no_output_____" ] ], [ [ "## perceptual_path_length_score", "_____no_output_____" ] ], [ [ "from evaluation.generativity_metrics.perceptual_path_length import perceptual_path_length_score", "_____no_output_____" ], [ "ppl_mean_score = perceptual_path_length_score(ae, training_generator, tolerance_threshold=1e-6, max_iteration=200, batch_size=32)\nprint(f'perceptual path length score: {ppl_mean_score}')", "_____no_output_____" ] ], [ [ "## precision score", "_____no_output_____" ] ], [ [ "from evaluation.generativity_metrics.precision_recall import precision_score", "_____no_output_____" ], [ "_precision_score = precision_score(ae, training_generator, tolerance_threshold=1e-6, max_iteration=200)\nprint(f'precision score: {_precision_score}')", "_____no_output_____" ] ], [ [ "## recall score", "_____no_output_____" ] ], [ [ "from evaluation.generativity_metrics.precision_recall import recall_score", "_____no_output_____" ], [ "_recall_score = recall_score(ae, training_generator, tolerance_threshold=1e-6, max_iteration=200)\nprint(f'recall score: {_recall_score}')", "_____no_output_____" ] ], [ [ "# Image Generation", "_____no_output_____" ], [ "## image reconstruction", "_____no_output_____" ], [ "### Training dataset", "_____no_output_____" ] ], [ [ "%load_ext autoreload\n%autoreload 2", "_____no_output_____" ], [ "from training.generators.image_generation_testing import reconstruct_from_a_batch", "_____no_output_____" ], [ "from utils.data_and_files.file_utils import create_if_not_exist\nsave_dir = os.path.join(experiments_dir, 'reconstruct_training_images_like_a_batch_dir')\ncreate_if_not_exist(save_dir)\n\nreconstruct_from_a_batch(ae, training_generator, save_dir)", "_____no_output_____" ], [ "from utils.data_and_files.file_utils import create_if_not_exist\nsave_dir = os.path.join(experiments_dir, 'reconstruct_testing_images_like_a_batch_dir')\ncreate_if_not_exist(save_dir)\n\nreconstruct_from_a_batch(ae, testing_generator, save_dir)", "_____no_output_____" ] ], [ [ "## with Randomness", "_____no_output_____" ] ], [ [ "from training.generators.image_generation_testing import generate_images_like_a_batch", "_____no_output_____" ], [ "from utils.data_and_files.file_utils import create_if_not_exist\nsave_dir = os.path.join(experiments_dir, 'generate_training_images_like_a_batch_dir')\ncreate_if_not_exist(save_dir)\n\ngenerate_images_like_a_batch(ae, training_generator, save_dir)", "_____no_output_____" ], [ "from utils.data_and_files.file_utils import create_if_not_exist\nsave_dir = os.path.join(experiments_dir, 'generate_testing_images_like_a_batch_dir')\ncreate_if_not_exist(save_dir)\n\ngenerate_images_like_a_batch(ae, testing_generator, save_dir)", "_____no_output_____" ] ], [ [ "### Complete Randomness", "_____no_output_____" ] ], [ [ "from training.generators.image_generation_testing import generate_images_randomly", "_____no_output_____" ], [ "from utils.data_and_files.file_utils import create_if_not_exist\nsave_dir = os.path.join(experiments_dir, 'random_synthetic_dir')\ncreate_if_not_exist(save_dir)\n\ngenerate_images_randomly(ae, save_dir)", "_____no_output_____" ], [ "from training.generators.image_generation_testing import interpolate_a_batch", "_____no_output_____" ], [ "from utils.data_and_files.file_utils import create_if_not_exist\nsave_dir = os.path.join(experiments_dir, 'interpolate_dir')\ncreate_if_not_exist(save_dir)\n\ninterpolate_a_batch(ae, testing_generator, save_dir)", "100%|██████████| 15/15 [00:00<00:00, 19.90it/s]\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
c52c9c10d61cb4fa41768eb38f68c6d6fb334974
8,473
ipynb
Jupyter Notebook
docs/examples/tutorial/model_selection/02_model_selector_result_basic.ipynb
ambader/hcrystalball
713636e698d9a260fab982764fce4a13699be1a8
[ "MIT" ]
139
2020-06-29T16:36:16.000Z
2022-01-25T21:49:10.000Z
docs/examples/tutorial/model_selection/02_model_selector_result_basic.ipynb
ambader/hcrystalball
713636e698d9a260fab982764fce4a13699be1a8
[ "MIT" ]
34
2020-06-29T12:31:26.000Z
2022-03-18T13:56:21.000Z
docs/examples/tutorial/model_selection/02_model_selector_result_basic.ipynb
ambader/hcrystalball
713636e698d9a260fab982764fce4a13699be1a8
[ "MIT" ]
28
2020-06-30T06:00:39.000Z
2022-03-18T13:27:58.000Z
22.007792
242
0.561784
[ [ [ "# Inspecting ModelSelectorResult\nWhen we go down from multiple time-series to single time-series, the best way how to get access to all relevant information to use/access `ModelSelectorResult` objects", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport matplotlib.pyplot as plt\nplt.style.use('seaborn')\nplt.rcParams['figure.figsize'] = [12, 6]", "_____no_output_____" ], [ "from hcrystalball.model_selection import ModelSelector\nfrom hcrystalball.utils import get_sales_data\nfrom hcrystalball.wrappers import get_sklearn_wrapper\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.ensemble import RandomForestRegressor", "_____no_output_____" ], [ "df = get_sales_data(n_dates=365*2, \n n_assortments=1, \n n_states=1, \n n_stores=2)\ndf.head()", "_____no_output_____" ], [ "# let's start simple\ndf_minimal = df[['Sales']]", "_____no_output_____" ], [ "ms_minimal = ModelSelector(frequency='D', horizon=10)", "_____no_output_____" ], [ "ms_minimal.create_gridsearch(\n n_splits=2,\n between_split_lag=None,\n sklearn_models=False,\n sklearn_models_optimize_for_horizon=False,\n autosarimax_models=False,\n prophet_models=False,\n tbats_models=False,\n exp_smooth_models=False,\n average_ensembles=False,\n stacking_ensembles=False)", "_____no_output_____" ], [ "ms_minimal.add_model_to_gridsearch(get_sklearn_wrapper(LinearRegression, hcb_verbose=False))\nms_minimal.add_model_to_gridsearch(get_sklearn_wrapper(RandomForestRegressor, random_state=42, hcb_verbose=False))", "_____no_output_____" ], [ "ms_minimal.select_model(df=df_minimal, target_col_name='Sales')", "_____no_output_____" ] ], [ [ "## Ways to access ModelSelectorResult\n\nThere are three ways how you can get to single time-series result level.\n\n- First is over `.results[i]`, which is fast, but does not ensure, that results are loaded in the same order as when they were created (reason for that is hash used in the name of each result, that are later read in alphabetic order)\n- Second and third uses `.get_result_for_partition()` through `dict` based partition \n- Forth does that using `partition_hash` (also in results file name if persisted)", "_____no_output_____" ] ], [ [ "result = ms_minimal.results[0]\nresult = ms_minimal.get_result_for_partition({'no_partition_label': ''})\nresult = ms_minimal.get_result_for_partition(ms_minimal.partitions[0])\nresult = ms_minimal.get_result_for_partition('fb452abd91f5c3bcb8afa4162c6452c2')", "_____no_output_____" ] ], [ [ "## ModelSelectorResult is rich\nAs you can see below, we try to store all relevant information to enable easy access to data, that is otherwise very lenghty.", "_____no_output_____" ] ], [ [ "result", "_____no_output_____" ] ], [ [ "### Traning data", "_____no_output_____" ] ], [ [ "result.X_train", "_____no_output_____" ], [ "result.y_train", "_____no_output_____" ] ], [ [ "### Data behind plots\nReady to be plotted or adjusted to your needs", "_____no_output_____" ] ], [ [ "result.df_plot", "_____no_output_____" ], [ "result.df_plot.tail(50).plot();", "_____no_output_____" ], [ "result", "_____no_output_____" ] ], [ [ "## Best Model Metadata\nThat can help to filter for example `cv_data` or to get a glimpse on which parameters the best model has", "_____no_output_____" ] ], [ [ "result.best_model_hash", "_____no_output_____" ], [ "result.best_model_name", "_____no_output_____" ], [ "result.best_model_repr", "_____no_output_____" ] ], [ [ "### CV Results\nGet information about how our model behaved in cross validation", "_____no_output_____" ] ], [ [ "result.best_model_cv_results['mean_fit_time']", "_____no_output_____" ] ], [ [ "Or how all the models behaved", "_____no_output_____" ] ], [ [ "result.cv_results.sort_values('rank_test_score').head()", "_____no_output_____" ] ], [ [ "### CV Data\nAccess predictions made during cross validation with possible cv splits and true target values", "_____no_output_____" ] ], [ [ "result.cv_data.head()", "_____no_output_____" ], [ "result.cv_data.drop(['split'], axis=1).plot();", "_____no_output_____" ], [ "result.best_model_cv_data.head()", "_____no_output_____" ], [ "result.best_model_cv_data.plot();", "_____no_output_____" ] ], [ [ "## Plotting Functions\nWith `**plot_params` that you can pass depending on your plotting backend ", "_____no_output_____" ] ], [ [ "result.plot_result(plot_from='2015-06', title='Performance', color=['blue','green']);", "_____no_output_____" ], [ "result.plot_error(title='Error');", "_____no_output_____" ] ], [ [ "## Convenient Persist Method", "_____no_output_____" ] ], [ [ "result.persist?", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
c52cafbf8994e9d8362c805a350bed67041757ac
23,724
ipynb
Jupyter Notebook
content/03-python-numpy/numpy-basics.ipynb
sbu-phy-ast-reu/reu-python-tutorial
7bac6d87d4528a45e2169020266d92fc1a47dc70
[ "BSD-3-Clause" ]
3
2020-05-07T03:11:10.000Z
2020-06-19T18:17:41.000Z
content/03-python-numpy/numpy-basics.ipynb
sbu-phy-ast-reu/reu-python-tutorial
7bac6d87d4528a45e2169020266d92fc1a47dc70
[ "BSD-3-Clause" ]
null
null
null
content/03-python-numpy/numpy-basics.ipynb
sbu-phy-ast-reu/reu-python-tutorial
7bac6d87d4528a45e2169020266d92fc1a47dc70
[ "BSD-3-Clause" ]
5
2020-05-21T07:03:28.000Z
2022-01-09T08:15:03.000Z
20.774081
282
0.525797
[ [ [ "# NumPy", "_____no_output_____" ], [ "this notebook is based on the SciPy NumPy tutorial", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-warning\">\nNote that the traditional way to import numpy is to rename it np. This saves on typing and makes your code a little more compact.</div>", "_____no_output_____" ] ], [ [ "import numpy as np", "_____no_output_____" ] ], [ [ "NumPy provides a multidimensional array class as well as a _large_ number of functions that operate on arrays.\n\nNumPy arrays allow you to write fast (optimized) code that works on arrays of data. To do this, there are some restrictions on arrays:\n\n* all elements are of the same data type (e.g. float)\n* the size of the array is fixed in memory, and specified when you create the array (e.g., you cannot grow the array like you do with lists)\n\nThe nice part is that arithmetic operations work on entire arrays&mdash;this means that you can avoid writing loops in python (which tend to be slow). Instead the \"looping\" is done in the underlying compiled code", "_____no_output_____" ], [ "## Array Creation and Properties", "_____no_output_____" ], [ "There are a lot of ways to create arrays. Let's look at a few\n\nHere we create an array using `arange` and then change its shape to be 3 rows and 5 columns. Note the _row-major ordering_&mdash;you'll see that the rows are together in the inner `[]` (more on this in a bit)", "_____no_output_____" ] ], [ [ "a = np.arange(15)", "_____no_output_____" ], [ "a", "_____no_output_____" ], [ "a = np.arange(15).reshape(3,5)\n\nprint(a)", "_____no_output_____" ] ], [ [ "an array is an object of the `ndarray` class, and it has methods that know how to work on the array data. Here, `.reshape()` is one of the many methods.", "_____no_output_____" ], [ "A NumPy array has a lot of meta-data associated with it describing its shape, datatype, etc.", "_____no_output_____" ] ], [ [ "print(a.ndim)\nprint(a.shape)\nprint(a.size)\nprint(a.dtype)\nprint(a.itemsize)\nprint(type(a))", "_____no_output_____" ], [ "help(a)", "_____no_output_____" ] ], [ [ "we can also create an array from a list", "_____no_output_____" ] ], [ [ "b = np.array( [1.0, 2.0, 3.0, 4.0] )\nprint(b)\nprint(b.dtype)\nprint(type(b))", "_____no_output_____" ] ], [ [ "we can create a multi-dimensional array of a specified size initialized all to 0 easily, using the `zeros()` function.", "_____no_output_____" ] ], [ [ "b = np.zeros((10,8))\nb", "_____no_output_____" ] ], [ [ "There is also an analogous ones() and empty() array routine. Note that here we can explicitly set the datatype for the array in this function if we wish. \n\nUnlike lists in python, all of the elements of a numpy array are of the same datatype", "_____no_output_____" ] ], [ [ "c = np.eye(10, dtype=np.float64)\nc", "_____no_output_____" ] ], [ [ "`linspace` creates an array with evenly space numbers. The `endpoint` optional argument specifies whether the upper range is in the array or not", "_____no_output_____" ] ], [ [ "d = np.linspace(0, 1, 10, endpoint=False)\nprint(d)", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-block alert-info\"><h3><span class=\"fa fa-flash\"></span> Quick Exercise:</h3>\n\nAnalogous to `linspace()`, there is a `logspace()` function that creates an array with elements equally spaced in log. Use `help(np.logspace)` to see the arguments, and create an array with 10 elements from $10^{-6}$ to $10^3$.\n\n</div>", "_____no_output_____" ], [ "we can also initialize an array based on a function", "_____no_output_____" ] ], [ [ "f = np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int)\nf", "_____no_output_____" ] ], [ [ "## Array Operations", "_____no_output_____" ], [ "most operations (`+`, `-`, `*`, `/`) will work on an entire array at once, element-by-element.\n\nNote that that the multiplication operator is not a matrix multiply (there is a new operator in python 3.5+, `@`, to do matrix multiplicaiton.\n\nLet's create a simply array to start with", "_____no_output_____" ] ], [ [ "a = np.arange(12).reshape(3,4)\nprint(a)", "_____no_output_____" ] ], [ [ "Multiplication by a scalar multiplies every element", "_____no_output_____" ] ], [ [ "a*2", "_____no_output_____" ] ], [ [ "adding two arrays adds element-by-element", "_____no_output_____" ] ], [ [ "a + a", "_____no_output_____" ] ], [ [ "multiplying two arrays multiplies element-by-element", "_____no_output_____" ] ], [ [ "a*a", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-block alert-info\"><h3><span class=\"fa fa-flash\"></span> Quick Exercise:</h3>\n\n\nWhat do you think `1./a` will do? Try it and see\n\n</div>", "_____no_output_____" ], [ "We can think of our 2-d array as a 3 x 4 matrix (3 rows, 4 columns). We can take the transpose to geta 4 x 3 matrix, and then we can do a matrix multiplication", "_____no_output_____" ] ], [ [ "b = a.transpose()\nb", "_____no_output_____" ], [ "a @ b", "_____no_output_____" ] ], [ [ "We can sum the elements of an array:", "_____no_output_____" ] ], [ [ "a.sum()", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-block alert-info\"><h3><span class=\"fa fa-flash\"></span> Quick Exercise:</h3>\n\n`sum()` takes an optional argument, `axis=N`, where `N` is the axis to sum over. Sum the elements of `a` across rows to create an array with just the sum along each column of `a`.\n\n</div>", "_____no_output_____" ], [ "We can also easily get the extrema", "_____no_output_____" ] ], [ [ "print(a.min(), a.max())", "_____no_output_____" ] ], [ [ "## Universal functions", "_____no_output_____" ], [ "universal functions work element-by-element. Let's create a new array scaled by `pi`", "_____no_output_____" ] ], [ [ "b = a*np.pi/12.0\nprint(b)", "_____no_output_____" ], [ "c = np.cos(b)\nprint(c)", "_____no_output_____" ], [ "d = b + c ", "_____no_output_____" ], [ "print(d)", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-block alert-info\"><h3><span class=\"fa fa-flash\"></span> Quick Exercise:</h3>\n\nWe will often want to write our own function that operates on an array and returns a new array. We can do this just like we did with functions previously&mdash;the key is to use the methods from the `np` module to do any operations, since they work on, and return, arrays.\n\nWrite a simple function that returns $\\sin(2\\pi x)$ for an input array `x`. Then test it \nby passing in an array `x` that you create via `linspace()`\n\n</div>", "_____no_output_____" ], [ "## Slicing", "_____no_output_____" ], [ "slicing works very similarly to how we saw with strings. Remember, python uses 0-based indexing\n\n![](slicing.png)\n\nLet's create this array from the image:", "_____no_output_____" ] ], [ [ "a = np.arange(9)\na", "_____no_output_____" ] ], [ [ "Now look at accessing a single element vs. a range (using slicing)", "_____no_output_____" ], [ "Giving a single (0-based) index just references a single value", "_____no_output_____" ] ], [ [ "a[2]", "_____no_output_____" ] ], [ [ "Giving a range uses the range of the edges to return the values", "_____no_output_____" ] ], [ [ "print(a[2:3])", "_____no_output_____" ], [ "a[2:4]", "_____no_output_____" ] ], [ [ "The `:` can be used to specify all of the elements in that dimension", "_____no_output_____" ] ], [ [ "a[:]", "_____no_output_____" ] ], [ [ "## Multidimensional Arrays\n\nMultidimensional arrays are stored in a contiguous space in memory -- this means that the columns / rows need to be unraveled (flattened) so that it can be thought of as a single one-dimensional array. Different programming languages do this via different conventions:\n\n![](row_column_major.png)\n\nStorage order:\n\n* Python/C use *row-major* storage: rows are stored one after the other\n* Fortran/matlab use *column-major* storage: columns are stored one after another\n\nThe ordering matters when \n\n* passing arrays between languages (we'll talk about this later this semester)\n* looping over arrays -- you want to access elements that are next to one-another in memory\n * e.g, in Fortran:\n ```\n double precision :: A(M,N)\n do j = 1, N\n do i = 1, M\n A(i,j) = …\n enddo\n enddo\n ```\n \n * in C\n ```\n double A[M][N];\n for (i = 0; i < M; i++) {\n for (j = 0; j < N; j++) {\n A[i][j] = …\n }\n } \n ```\n \n\nIn python, using NumPy, we'll try to avoid explicit loops over elements as much as possible\n\nLet's look at multidimensional arrays:", "_____no_output_____" ] ], [ [ "a = np.arange(15).reshape(3,5)\na", "_____no_output_____" ] ], [ [ "Notice that the output of `a` shows the row-major storage. The rows are grouped together in the inner `[...]`\n\nGiving a single index (0-based) for each dimension just references a single value in the array", "_____no_output_____" ] ], [ [ "a[1,1]", "_____no_output_____" ] ], [ [ "Doing slices will access a range of elements. Think of the start and stop in the slice as referencing the left-edge of the slots in the array.", "_____no_output_____" ] ], [ [ "a[0:2,0:2]", "_____no_output_____" ] ], [ [ "Access a specific column", "_____no_output_____" ] ], [ [ "a[:,1]", "_____no_output_____" ] ], [ [ "Sometimes we want a one-dimensional view into the array -- here we see the memory layout (row-major) more explicitly", "_____no_output_____" ] ], [ [ "a.flatten()", "_____no_output_____" ] ], [ [ "we can also iterate -- this is done over the first axis (rows)", "_____no_output_____" ] ], [ [ "for row in a:\n print(row)", "_____no_output_____" ] ], [ [ "or element by element", "_____no_output_____" ] ], [ [ "for e in a.flat:\n print(e)", "_____no_output_____" ] ], [ [ "Generally speaking, we want to avoid looping over the elements of an array in python&mdash;this is slow. Instead we want to write and use functions that operate on the entire array at once.", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-info\"><h3><span class=\"fa fa-flash\"></span> Quick Exercise:</h3>\n\nConsider the array defined as:\n```\n \n q = np.array([[1, 2, 3, 2, 1],\n [2, 4, 4, 4, 2],\n [3, 4, 4, 4, 3],\n [2, 4, 4, 4, 2],\n [1, 2, 3, 2, 1]])\n\n```\n \n\n * using slice notation, create an array that consists of only the `4`'s in `q` (this will be called a _view_, as we'll see shortly)\n * zero out all of the elements in your view\n * how does `q` change?\n</div>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
c52cb27cbc62208c5eaf8d0d1982a4e5571138a2
245,295
ipynb
Jupyter Notebook
lab2/ADZD-2-SQL-Intro.ipynb
FrozenTear7/big-data-analysis
83b1d10e7ba7b494e520b4df3f2280395af10738
[ "MIT" ]
null
null
null
lab2/ADZD-2-SQL-Intro.ipynb
FrozenTear7/big-data-analysis
83b1d10e7ba7b494e520b4df3f2280395af10738
[ "MIT" ]
null
null
null
lab2/ADZD-2-SQL-Intro.ipynb
FrozenTear7/big-data-analysis
83b1d10e7ba7b494e520b4df3f2280395af10738
[ "MIT" ]
null
null
null
67.57438
40,472
0.573501
[ [ [ "# Introduction to Spark", "_____no_output_____" ], [ "## Basic initialization", "_____no_output_____" ], [ "`SparkSession` is used to connect to the Spark Cluster.", "_____no_output_____" ] ], [ [ "from pyspark.sql import SparkSession", "_____no_output_____" ] ], [ [ "We will use Pandas to operate on the reduced data in the *driver program*.", "_____no_output_____" ] ], [ [ "import pandas as pd", "_____no_output_____" ] ], [ [ "Numpy will be always useful.", "_____no_output_____" ] ], [ [ "import numpy as np", "_____no_output_____" ] ], [ [ "Create a new session (or reuse an existing one).", "_____no_output_____" ] ], [ [ "spark = SparkSession.builder.getOrCreate()", "_____no_output_____" ], [ "spark", "_____no_output_____" ] ], [ [ "We can see that the session is established.", "_____no_output_____" ], [ "## Creating Spark Data Frames from Pandas", "_____no_output_____" ], [ "We can list the tables in our Spark Session, currently empty.", "_____no_output_____" ] ], [ [ "print(spark.catalog.listTables())", "[]\n" ] ], [ [ "We can create a Pandas `DataFrame` with random values.", "_____no_output_____" ] ], [ [ "pd_temp = pd.DataFrame(np.random.random(100))", "_____no_output_____" ] ], [ [ "We can see on the plot that it is really random:", "_____no_output_____" ] ], [ [ "import matplotlib\nimport matplotlib.pyplot as plt\nmatplotlib.style.use('ggplot')", "_____no_output_____" ], [ "pd_temp.plot()", "_____no_output_____" ] ], [ [ "Now we can convert it into Spark DataFrame:", "_____no_output_____" ] ], [ [ "spark_temp = spark.createDataFrame(pd_temp)", "_____no_output_____" ] ], [ [ "`createOrReplaceTempView` creates (or replaces if that view name already exists) a lazily evaluated \"view\" that you can then use like a table in Spark SQL. \n\nIt does not persist to memory unless you cache (persist) the dataset that underpins the view.", "_____no_output_____" ] ], [ [ "spark_temp.createOrReplaceTempView(\"temp\")", "_____no_output_____" ] ], [ [ "The created view is `TEMPORARY` which means it is not persistent.", "_____no_output_____" ] ], [ [ "print(spark.catalog.listTables())", "[Table(name='temp', database=None, description=None, tableType='TEMPORARY', isTemporary=True)]\n" ], [ "spark_temp.show()", "+--------------------+\n| 0|\n+--------------------+\n| 0.8372778529061318|\n| 0.44948093498473807|\n| 0.0675589353725441|\n|0.040578039567716884|\n| 0.9755184560479134|\n| 0.9500498172509029|\n| 0.7574676736686285|\n| 0.6298467913121013|\n| 0.9830733714672817|\n| 0.03282132742647337|\n| 0.7107727636448032|\n| 0.3513737450926274|\n| 0.3104506219214357|\n| 0.3984917305438903|\n| 0.2865188207682504|\n| 0.3130604133425421|\n| 0.8923663508811953|\n| 0.7426516063239591|\n| 0.20143228520179135|\n| 0.6407798977991666|\n+--------------------+\nonly showing top 20 rows\n\n" ] ], [ [ "We can now use transformations on this DataFrame. The transformations are translated (compiled) to RDD transformations.", "_____no_output_____" ] ], [ [ "from pyspark.sql.functions import col, asc", "_____no_output_____" ], [ "spark_temp.filter((col('0') > 0.9)).show()", "+------------------+\n| 0|\n+------------------+\n|0.9755184560479134|\n|0.9500498172509029|\n|0.9830733714672817|\n|0.9759206269467092|\n|0.9281935402318383|\n|0.9553961356215881|\n| 0.920919099141094|\n|0.9809113193737522|\n|0.9466164994994961|\n| 0.93950295232454|\n|0.9441475736361794|\n|0.9877151117607678|\n|0.9055881963995968|\n|0.9597778057536227|\n+------------------+\n\n" ] ], [ [ "## Creating Spark Data Frames from input files", "_____no_output_____" ] ], [ [ "file_path = \"airports.csv\"\n\n# Read in the airports data\nairports = spark.read.csv(file_path,header=True)\n\n# Show the data\nprint(airports.show())", "+---------+--------------------+-------------+-----+-------+--------+----------+\n|IATA_CODE| AIRPORT| CITY|STATE|COUNTRY|LATITUDE| LONGITUDE|\n+---------+--------------------+-------------+-----+-------+--------+----------+\n| ABE|Lehigh Valley Int...| Allentown| PA| USA|40.65236| -75.44040|\n| ABI|Abilene Regional ...| Abilene| TX| USA|32.41132| -99.68190|\n| ABQ|Albuquerque Inter...| Albuquerque| NM| USA|35.04022|-106.60919|\n| ABR|Aberdeen Regional...| Aberdeen| SD| USA|45.44906| -98.42183|\n| ABY|Southwest Georgia...| Albany| GA| USA|31.53552| -84.19447|\n| ACK|Nantucket Memoria...| Nantucket| MA| USA|41.25305| -70.06018|\n| ACT|Waco Regional Air...| Waco| TX| USA|31.61129| -97.23052|\n| ACV| Arcata Airport|Arcata/Eureka| CA| USA|40.97812|-124.10862|\n| ACY|Atlantic City Int...|Atlantic City| NJ| USA|39.45758| -74.57717|\n| ADK| Adak Airport| Adak| AK| USA|51.87796|-176.64603|\n| ADQ| Kodiak Airport| Kodiak| AK| USA|57.74997|-152.49386|\n| AEX|Alexandria Intern...| Alexandria| LA| USA|31.32737| -92.54856|\n| AGS|Augusta Regional ...| Augusta| GA| USA|33.36996| -81.96450|\n| AKN| King Salmon Airport| King Salmon| AK| USA|58.67680|-156.64922|\n| ALB|Albany Internatio...| Albany| NY| USA|42.74812| -73.80298|\n| ALO|Waterloo Regional...| Waterloo| IA| USA|42.55708| -92.40034|\n| AMA|Rick Husband Amar...| Amarillo| TX| USA|35.21937|-101.70593|\n| ANC|Ted Stevens Ancho...| Anchorage| AK| USA|61.17432|-149.99619|\n| APN|Alpena County Reg...| Alpena| MI| USA|45.07807| -83.56029|\n| ASE|Aspen-Pitkin Coun...| Aspen| CO| USA|39.22316|-106.86885|\n+---------+--------------------+-------------+-----+-------+--------+----------+\nonly showing top 20 rows\n\nNone\n" ] ], [ [ "It may be useful to convert them to Pandas for quick browsing. \n\n**Warning!** This is not efficient for large datasets, as it requires performing actions on the dataset.", "_____no_output_____" ] ], [ [ "airports.toPandas()", "_____no_output_____" ] ], [ [ "### Running SQL queries on dataframes", "_____no_output_____" ] ], [ [ "airports.createOrReplaceTempView(\"airports\")", "_____no_output_____" ], [ "# Get the first 10 rows of flights\nquery = \"FROM airports SELECT * LIMIT 10\"\n\nairports10 = spark.sql(query)\n\n# Show the results\nairports10.show()", "+---------+--------------------+-------------+-----+-------+--------+----------+\n|IATA_CODE| AIRPORT| CITY|STATE|COUNTRY|LATITUDE| LONGITUDE|\n+---------+--------------------+-------------+-----+-------+--------+----------+\n| ABE|Lehigh Valley Int...| Allentown| PA| USA|40.65236| -75.44040|\n| ABI|Abilene Regional ...| Abilene| TX| USA|32.41132| -99.68190|\n| ABQ|Albuquerque Inter...| Albuquerque| NM| USA|35.04022|-106.60919|\n| ABR|Aberdeen Regional...| Aberdeen| SD| USA|45.44906| -98.42183|\n| ABY|Southwest Georgia...| Albany| GA| USA|31.53552| -84.19447|\n| ACK|Nantucket Memoria...| Nantucket| MA| USA|41.25305| -70.06018|\n| ACT|Waco Regional Air...| Waco| TX| USA|31.61129| -97.23052|\n| ACV| Arcata Airport|Arcata/Eureka| CA| USA|40.97812|-124.10862|\n| ACY|Atlantic City Int...|Atlantic City| NJ| USA|39.45758| -74.57717|\n| ADK| Adak Airport| Adak| AK| USA|51.87796|-176.64603|\n+---------+--------------------+-------------+-----+-------+--------+----------+\n\n" ] ], [ [ "### More complex examples \n\nRead data from CSV file:\n * `inferSchema` - to detect which columns are numbers (not strigs!) - useful e.g. for sorting.\n * `header` - to read the firs line as column names", "_____no_output_____" ] ], [ [ "countries = spark.read.csv(\"countries of the world.csv\",inferSchema=True,header=True)", "_____no_output_____" ], [ "countries.toPandas()", "_____no_output_____" ] ], [ [ "We can inspect the schema of the DataFrame.", "_____no_output_____" ] ], [ [ "countries.printSchema()", "root\n |-- Country: string (nullable = true)\n |-- Region: string (nullable = true)\n |-- Population: integer (nullable = true)\n |-- Area (sq. mi.): integer (nullable = true)\n |-- Pop. Density (per sq. mi.): string (nullable = true)\n |-- Coastline (coast/area ratio): string (nullable = true)\n |-- Net migration: string (nullable = true)\n |-- Infant mortality (per 1000 births): string (nullable = true)\n |-- GDP ($ per capita): integer (nullable = true)\n |-- Literacy (%): string (nullable = true)\n |-- Phones (per 1000): string (nullable = true)\n |-- Arable (%): string (nullable = true)\n |-- Crops (%): string (nullable = true)\n |-- Other (%): string (nullable = true)\n |-- Climate: string (nullable = true)\n |-- Birthrate: string (nullable = true)\n |-- Deathrate: string (nullable = true)\n |-- Agriculture: string (nullable = true)\n |-- Industry: string (nullable = true)\n |-- Service: string (nullable = true)\n\n" ] ], [ [ "### Examples of SQL Queries", "_____no_output_____" ] ], [ [ "countries.createOrReplaceTempView(\"countries\")", "_____no_output_____" ], [ "spark.sql(\"SELECT * FROM countries WHERE Region LIKE '%OCEANIA%'\").toPandas()", "_____no_output_____" ] ], [ [ "### Queries using PySpark DSL\n\nDSL = Domain Specific Language - API similar to natural or other language, implemented as library in another language.", "_____no_output_____" ], [ "List all the countries with the population > 38 million", "_____no_output_____" ] ], [ [ "countries.filter((col(\"Population\") > 38000000)).orderBy(\"Population\").toPandas()", "_____no_output_____" ] ], [ [ "Select all the countries from Europe", "_____no_output_____" ] ], [ [ "countries.select(\"Country\", \"Population\").where(col(\"Region\").like(\"%EUROPE%\")).show()", "+--------------------+----------+\n| Country|Population|\n+--------------------+----------+\n| Albania | 3581655|\n| Andorra | 71201|\n| Austria | 8192880|\n| Belgium | 10379067|\n|Bosnia & Herzegov...| 4498976|\n| Bulgaria | 7385367|\n| Croatia | 4494749|\n| Czech Republic | 10235455|\n| Denmark | 5450661|\n| Faroe Islands | 47246|\n| Finland | 5231372|\n| France | 60876136|\n| Germany | 82422299|\n| Gibraltar | 27928|\n| Greece | 10688058|\n| Guernsey | 65409|\n| Hungary | 9981334|\n| Iceland | 299388|\n| Ireland | 4062235|\n| Isle of Man | 75441|\n+--------------------+----------+\nonly showing top 20 rows\n\n" ] ], [ [ "Conditions in `where` clause can contain logical expressions.", "_____no_output_____" ] ], [ [ "countries.select(\"Country\", \"Population\")\\\n.where((col(\"Region\").like(\"%EUROPE%\")) & (col(\"Population\")> 10000000)).show()", "+---------------+----------+\n| Country|Population|\n+---------------+----------+\n| Belgium | 10379067|\n|Czech Republic | 10235455|\n| France | 60876136|\n| Germany | 82422299|\n| Greece | 10688058|\n| Italy | 58133509|\n| Netherlands | 16491461|\n| Poland | 38536869|\n| Portugal | 10605870|\n| Romania | 22303552|\n| Spain | 40397842|\n|United Kingdom | 60609153|\n+---------------+----------+\n\n" ] ], [ [ "### Aggregation\n\nWe can run aggregations with predefined functions (faster!):", "_____no_output_____" ] ], [ [ "from pyspark.sql.functions import sum", "_____no_output_____" ], [ "pd_countries = countries.select(\"Region\", \"Population\").groupBy(\"Region\").agg(sum(\"Population\")).toPandas()", "_____no_output_____" ], [ "pd_countries", "_____no_output_____" ] ], [ [ "We can make the column name look better, by using `alias`:", "_____no_output_____" ] ], [ [ "pd_countries = countries.select(\"Region\", \"Population\").groupBy(\"Region\").agg(sum(\"Population\").alias('Total')).toPandas()", "_____no_output_____" ], [ "pd_countries", "_____no_output_____" ] ], [ [ "### Plot examples \nPandas DataFrames are useful for plotting using MatPlotLib:", "_____no_output_____" ] ], [ [ "pd_countries.plot(x='Region', y='Total',kind='bar', figsize=(10, 6))", "_____no_output_____" ] ], [ [ "## User defined functions for data manipulation\nOur `countries` DataFrame has some problems:\n * missing values\n * some numbers use comma instead of point as floating point separator (e.g. Literacy = 99,4)\n \nWe can clean the data using User Defined Functions (UDF)", "_____no_output_____" ] ], [ [ "from pyspark.sql.types import FloatType\nfrom pyspark.sql.functions import udf", "_____no_output_____" ] ], [ [ "Define a Python function which coverts numbers with commas to `float`", "_____no_output_____" ] ], [ [ "def to_float (s) :\n return float(s.replace(',','.'))", "_____no_output_____" ] ], [ [ "Test that it works:", "_____no_output_____" ] ], [ [ "to_float('0,99')", "_____no_output_____" ] ], [ [ "Now define a Spark UDF:", "_____no_output_____" ] ], [ [ "float_udf = udf(to_float , FloatType())", "_____no_output_____" ] ], [ [ "Test it on a Data Frame", "_____no_output_____" ] ], [ [ "countries.withColumn(\"Literacy\", float_udf(\"Literacy (%)\"))", "_____no_output_____" ] ], [ [ "OK, we can see that the `Literacy` is now `float`", "_____no_output_____" ] ], [ [ "countries.show(50)", "+--------------------+--------------------+----------+--------------+--------------------------+----------------------------+-------------+----------------------------------+------------------+------------+-----------------+----------+---------+---------+-------+---------+---------+-----------+--------+-------+\n| Country| Region|Population|Area (sq. mi.)|Pop. Density (per sq. mi.)|Coastline (coast/area ratio)|Net migration|Infant mortality (per 1000 births)|GDP ($ per capita)|Literacy (%)|Phones (per 1000)|Arable (%)|Crops (%)|Other (%)|Climate|Birthrate|Deathrate|Agriculture|Industry|Service|\n+--------------------+--------------------+----------+--------------+--------------------------+----------------------------+-------------+----------------------------------+------------------+------------+-----------------+----------+---------+---------+-------+---------+---------+-----------+--------+-------+\n| Afghanistan |ASIA (EX. NEAR EA...| 31056997| 647500| 48,0| 0,00| 23,06| 163,07| 700| 36,0| 3,2| 12,13| 0,22| 87,65| 1| 46,6| 20,34| 0,38| 0,24| 0,38|\n| Albania |EASTERN EUROPE ...| 3581655| 28748| 124,6| 1,26| -4,93| 21,52| 4500| 86,5| 71,2| 21,09| 4,42| 74,49| 3| 15,11| 5,22| 0,232| 0,188| 0,579|\n| Algeria |NORTHERN AFRICA ...| 32930091| 2381740| 13,8| 0,04| -0,39| 31| 6000| 70,0| 78,1| 3,22| 0,25| 96,53| 1| 17,14| 4,61| 0,101| 0,6| 0,298|\n| American Samoa |OCEANIA ...| 57794| 199| 290,4| 58,29| -20,71| 9,27| 8000| 97,0| 259,5| 10| 15| 75| 2| 22,46| 3,27| null| null| null|\n| Andorra |WESTERN EUROPE ...| 71201| 468| 152,1| 0,00| 6,6| 4,05| 19000| 100,0| 497,2| 2,22| 0| 97,78| 3| 8,71| 6,25| null| null| null|\n| Angola |SUB-SAHARAN AFRIC...| 12127071| 1246700| 9,7| 0,13| 0| 191,19| 1900| 42,0| 7,8| 2,41| 0,24| 97,35| null| 45,11| 24,2| 0,096| 0,658| 0,246|\n| Anguilla |LATIN AMER. & CAR...| 13477| 102| 132,1| 59,80| 10,76| 21,03| 8600| 95,0| 460,0| 0| 0| 100| 2| 14,17| 5,34| 0,04| 0,18| 0,78|\n| Antigua & Barbuda |LATIN AMER. & CAR...| 69108| 443| 156,0| 34,54| -6,15| 19,46| 11000| 89,0| 549,9| 18,18| 4,55| 77,27| 2| 16,93| 5,37| 0,038| 0,22| 0,743|\n| Argentina |LATIN AMER. & CAR...| 39921833| 2766890| 14,4| 0,18| 0,61| 15,18| 11200| 97,1| 220,4| 12,31| 0,48| 87,21| 3| 16,73| 7,55| 0,095| 0,358| 0,547|\n| Armenia |C.W. OF IND. STATES | 2976372| 29800| 99,9| 0,00| -6,47| 23,28| 3500| 98,6| 195,7| 17,55| 2,3| 80,15| 4| 12,07| 8,23| 0,239| 0,343| 0,418|\n| Aruba |LATIN AMER. & CAR...| 71891| 193| 372,5| 35,49| 0| 5,89| 28000| 97,0| 516,1| 10,53| 0| 89,47| 2| 11,03| 6,68| 0,004| 0,333| 0,663|\n| Australia |OCEANIA ...| 20264082| 7686850| 2,6| 0,34| 3,98| 4,69| 29000| 100,0| 565,5| 6,55| 0,04| 93,41| 1| 12,14| 7,51| 0,038| 0,262| 0,7|\n| Austria |WESTERN EUROPE ...| 8192880| 83870| 97,7| 0,00| 2| 4,66| 30000| 98,0| 452,2| 16,91| 0,86| 82,23| 3| 8,74| 9,76| 0,018| 0,304| 0,678|\n| Azerbaijan |C.W. OF IND. STATES | 7961619| 86600| 91,9| 0,00| -4,9| 81,74| 3400| 97,0| 137,1| 19,63| 2,71| 77,66| 1| 20,74| 9,75| 0,141| 0,457| 0,402|\n| Bahamas, The |LATIN AMER. & CAR...| 303770| 13940| 21,8| 25,41| -2,2| 25,21| 16700| 95,6| 460,6| 0,8| 0,4| 98,8| 2| 17,57| 9,05| 0,03| 0,07| 0,9|\n| Bahrain |NEAR EAST ...| 698585| 665| 1050,5| 24,21| 1,05| 17,27| 16900| 89,1| 281,3| 2,82| 5,63| 91,55| 1| 17,8| 4,14| 0,005| 0,387| 0,608|\n| Bangladesh |ASIA (EX. NEAR EA...| 147365352| 144000| 1023,4| 0,40| -0,71| 62,6| 1900| 43,1| 7,3| 62,11| 3,07| 34,82| 2| 29,8| 8,27| 0,199| 0,198| 0,603|\n| Barbados |LATIN AMER. & CAR...| 279912| 431| 649,5| 22,51| -0,31| 12,5| 15700| 97,4| 481,9| 37,21| 2,33| 60,46| 2| 12,71| 8,67| 0,06| 0,16| 0,78|\n| Belarus |C.W. OF IND. STATES | 10293011| 207600| 49,6| 0,00| 2,54| 13,37| 6100| 99,6| 319,1| 29,55| 0,6| 69,85| 4| 11,16| 14,02| 0,093| 0,316| 0,591|\n| Belgium |WESTERN EUROPE ...| 10379067| 30528| 340,0| 0,22| 1,23| 4,68| 29100| 98,0| 462,6| 23,28| 0,4| 76,32| 3| 10,38| 10,27| 0,01| 0,24| 0,749|\n| Belize |LATIN AMER. & CAR...| 287730| 22966| 12,5| 1,68| 0| 25,69| 4900| 94,1| 115,7| 2,85| 1,71| 95,44| 2| 28,84| 5,72| 0,142| 0,152| 0,612|\n| Benin |SUB-SAHARAN AFRIC...| 7862944| 112620| 69,8| 0,11| 0| 85| 1100| 40,9| 9,7| 18,08| 2,4| 79,52| 2| 38,85| 12,22| 0,316| 0,138| 0,546|\n| Bermuda |NORTHERN AMERICA ...| 65773| 53| 1241,0| 194,34| 2,49| 8,53| 36000| 98,0| 851,4| 20| 0| 80| 2| 11,4| 7,74| 0,01| 0,1| 0,89|\n| Bhutan |ASIA (EX. NEAR EA...| 2279723| 47000| 48,5| 0,00| 0| 100,44| 1300| 42,2| 14,3| 3,09| 0,43| 96,48| 2| 33,65| 12,7| 0,258| 0,379| 0,363|\n| Bolivia |LATIN AMER. & CAR...| 8989046| 1098580| 8,2| 0,00| -1,32| 53,11| 2400| 87,2| 71,9| 2,67| 0,19| 97,14| 1,5| 23,3| 7,53| 0,128| 0,352| 0,52|\n|Bosnia & Herzegov...|EASTERN EUROPE ...| 4498976| 51129| 88,0| 0,04| 0,31| 21,05| 6100| null| 215,4| 13,6| 2,96| 83,44| 4| 8,77| 8,27| 0,142| 0,308| 0,55|\n| Botswana |SUB-SAHARAN AFRIC...| 1639833| 600370| 2,7| 0,00| 0| 54,58| 9000| 79,8| 80,5| 0,65| 0,01| 99,34| 1| 23,08| 29,5| 0,024| 0,469| 0,507|\n| Brazil |LATIN AMER. & CAR...| 188078227| 8511965| 22,1| 0,09| -0,03| 29,61| 7600| 86,4| 225,3| 6,96| 0,9| 92,15| 2| 16,56| 6,17| 0,084| 0,4| 0,516|\n| British Virgin Is. |LATIN AMER. & CAR...| 23098| 153| 151,0| 52,29| 10,01| 18,05| 16000| 97,8| 506,5| 20| 6,67| 73,33| 2| 14,89| 4,42| 0,018| 0,062| 0,92|\n| Brunei |ASIA (EX. NEAR EA...| 379444| 5770| 65,8| 2,79| 3,59| 12,61| 18600| 93,9| 237,2| 0,57| 0,76| 98,67| 2| 18,79| 3,45| 0,036| 0,561| 0,403|\n| Bulgaria |EASTERN EUROPE ...| 7385367| 110910| 66,6| 0,32| -4,58| 20,55| 7600| 98,6| 336,3| 40,02| 1,92| 58,06| 3| 9,65| 14,27| 0,093| 0,304| 0,603|\n| Burkina Faso |SUB-SAHARAN AFRIC...| 13902972| 274200| 50,7| 0,00| 0| 97,57| 1100| 26,6| 7,0| 14,43| 0,19| 85,38| 2| 45,62| 15,6| 0,322| 0,196| 0,482|\n| Burma |ASIA (EX. NEAR EA...| 47382633| 678500| 69,8| 0,28| -1,8| 67,24| 1800| 85,3| 10,1| 15,19| 0,97| 83,84| 2| 17,91| 9,83| 0,564| 0,082| 0,353|\n| Burundi |SUB-SAHARAN AFRIC...| 8090068| 27830| 290,7| 0,00| -0,06| 69,29| 600| 51,6| 3,4| 35,05| 14,02| 50,93| 2| 42,22| 13,46| 0,463| 0,203| 0,334|\n| Cambodia |ASIA (EX. NEAR EA...| 13881427| 181040| 76,7| 0,24| 0| 71,48| 1900| 69,4| 2,6| 20,96| 0,61| 78,43| 2| 26,9| 9,06| 0,35| 0,3| 0,35|\n| Cameroon |SUB-SAHARAN AFRIC...| 17340702| 475440| 36,5| 0,08| 0| 68,26| 1800| 79,0| 5,7| 12,81| 2,58| 84,61| 1,5| 33,89| 13,47| 0,448| 0,17| 0,382|\n| Canada |NORTHERN AMERICA ...| 33098932| 9984670| 3,3| 2,02| 5,96| 4,75| 29800| 97,0| 552,2| 4,96| 0,02| 95,02| null| 10,78| 7,8| 0,022| 0,294| 0,684|\n| Cape Verde |SUB-SAHARAN AFRIC...| 420979| 4033| 104,4| 23,93| -12,07| 47,77| 1400| 76,6| 169,6| 9,68| 0,5| 89,82| 3| 24,87| 6,55| 0,121| 0,219| 0,66|\n| Cayman Islands |LATIN AMER. & CAR...| 45436| 262| 173,4| 61,07| 18,75| 8,19| 35000| 98,0| 836,3| 3,85| 0| 96,15| 2| 12,74| 4,89| 0,014| 0,032| 0,954|\n|Central African R...|SUB-SAHARAN AFRIC...| 4303356| 622984| 6,9| 0,00| 0| 91| 1100| 51,0| 2,3| 3,1| 0,14| 96,76| 2| 33,91| 18,65| 0,55| 0,2| 0,25|\n| Chad |SUB-SAHARAN AFRIC...| 9944201| 1284000| 7,7| 0,00| -0,11| 93,82| 1200| 47,5| 1,3| 2,86| 0,02| 97,12| 2| 45,73| 16,38| 0,335| 0,259| 0,406|\n| Chile |LATIN AMER. & CAR...| 16134219| 756950| 21,3| 0,85| 0| 8,8| 9900| 96,2| 213,0| 2,65| 0,42| 96,93| 3| 15,23| 5,81| 0,06| 0,493| 0,447|\n| China |ASIA (EX. NEAR EA...|1313973713| 9596960| 136,9| 0,15| -0,4| 24,18| 5000| 90,9| 266,7| 15,4| 1,25| 83,35| 1,5| 13,25| 6,97| 0,125| 0,473| 0,403|\n| Colombia |LATIN AMER. & CAR...| 43593035| 1138910| 38,3| 0,28| -0,31| 20,97| 6300| 92,5| 176,2| 2,42| 1,67| 95,91| 2| 20,48| 5,58| 0,125| 0,342| 0,533|\n| Comoros |SUB-SAHARAN AFRIC...| 690948| 2170| 318,4| 15,67| 0| 74,93| 700| 56,5| 24,5| 35,87| 23,32| 40,81| 2| 36,93| 8,2| 0,4| 0,04| 0,56|\n| Congo, Dem. Rep. |SUB-SAHARAN AFRIC...| 62660551| 2345410| 26,7| 0,00| 0| 94,69| 700| 65,5| 0,2| 2,96| 0,52| 96,52| 2| 43,69| 13,27| 0,55| 0,11| 0,34|\n|Congo, Repub. of ...|SUB-SAHARAN AFRIC...| 3702314| 342000| 10,8| 0,05| -0,17| 93,86| 700| 83,8| 3,7| 0,51| 0,13| 99,36| 2| 42,57| 12,93| 0,062| 0,57| 0,369|\n| Cook Islands |OCEANIA ...| 21388| 240| 89,1| 50,00| null| null| 5000| 95,0| 289,9| 17,39| 13,04| 69,57| 2| 21| null| 0,151| 0,096| 0,753|\n| Costa Rica |LATIN AMER. & CAR...| 4075261| 51100| 79,8| 2,52| 0,51| 9,95| 9100| 96,0| 340,7| 4,41| 5,88| 89,71| 2| 18,32| 4,36| 0,088| 0,299| 0,614|\n| Cote d'Ivoire |SUB-SAHARAN AFRIC...| 17654843| 322460| 54,8| 0,16| -0,07| 90,83| 1400| 50,9| 14,6| 9,75| 13,84| 76,41| 2| 35,11| 14,84| 0,279| 0,171| 0,55|\n+--------------------+--------------------+----------+--------------+--------------------------+----------------------------+-------------+----------------------------------+------------------+------------+-----------------+----------+---------+---------+-------+---------+---------+-----------+--------+-------+\nonly showing top 50 rows\n\n" ], [ "countries.where((col(\"Literacy\") < 50) & (col(\"GDP ($ per capita)\") > 700)).show()", "_____no_output_____" ] ], [ [ "Oops, what does it mean???\n\n- some rows have empty values!\n\nBefore we can use the table, we need to remove empty rows. Otherwise our UDF will fail.", "_____no_output_____" ] ], [ [ "full_countries = countries.select('Country', 'Population', 'Literacy (%)', 'GDP ($ per capita)').na.drop()", "_____no_output_____" ] ], [ [ "We can now apply the new UDF to the Data Frame:", "_____no_output_____" ] ], [ [ "full_countries = full_countries.withColumn(\"Literacy\", float_udf(\"Literacy (%)\"))", "_____no_output_____" ], [ "full_countries.show(50)", "+--------------------+----------+------------+------------------+--------+\n| Country|Population|Literacy (%)|GDP ($ per capita)|Literacy|\n+--------------------+----------+------------+------------------+--------+\n| Afghanistan | 31056997| 36,0| 700| 36.0|\n| Albania | 3581655| 86,5| 4500| 86.5|\n| Algeria | 32930091| 70,0| 6000| 70.0|\n| American Samoa | 57794| 97,0| 8000| 97.0|\n| Andorra | 71201| 100,0| 19000| 100.0|\n| Angola | 12127071| 42,0| 1900| 42.0|\n| Anguilla | 13477| 95,0| 8600| 95.0|\n| Antigua & Barbuda | 69108| 89,0| 11000| 89.0|\n| Argentina | 39921833| 97,1| 11200| 97.1|\n| Armenia | 2976372| 98,6| 3500| 98.6|\n| Aruba | 71891| 97,0| 28000| 97.0|\n| Australia | 20264082| 100,0| 29000| 100.0|\n| Austria | 8192880| 98,0| 30000| 98.0|\n| Azerbaijan | 7961619| 97,0| 3400| 97.0|\n| Bahamas, The | 303770| 95,6| 16700| 95.6|\n| Bahrain | 698585| 89,1| 16900| 89.1|\n| Bangladesh | 147365352| 43,1| 1900| 43.1|\n| Barbados | 279912| 97,4| 15700| 97.4|\n| Belarus | 10293011| 99,6| 6100| 99.6|\n| Belgium | 10379067| 98,0| 29100| 98.0|\n| Belize | 287730| 94,1| 4900| 94.1|\n| Benin | 7862944| 40,9| 1100| 40.9|\n| Bermuda | 65773| 98,0| 36000| 98.0|\n| Bhutan | 2279723| 42,2| 1300| 42.2|\n| Bolivia | 8989046| 87,2| 2400| 87.2|\n| Botswana | 1639833| 79,8| 9000| 79.8|\n| Brazil | 188078227| 86,4| 7600| 86.4|\n| British Virgin Is. | 23098| 97,8| 16000| 97.8|\n| Brunei | 379444| 93,9| 18600| 93.9|\n| Bulgaria | 7385367| 98,6| 7600| 98.6|\n| Burkina Faso | 13902972| 26,6| 1100| 26.6|\n| Burma | 47382633| 85,3| 1800| 85.3|\n| Burundi | 8090068| 51,6| 600| 51.6|\n| Cambodia | 13881427| 69,4| 1900| 69.4|\n| Cameroon | 17340702| 79,0| 1800| 79.0|\n| Canada | 33098932| 97,0| 29800| 97.0|\n| Cape Verde | 420979| 76,6| 1400| 76.6|\n| Cayman Islands | 45436| 98,0| 35000| 98.0|\n|Central African R...| 4303356| 51,0| 1100| 51.0|\n| Chad | 9944201| 47,5| 1200| 47.5|\n| Chile | 16134219| 96,2| 9900| 96.2|\n| China |1313973713| 90,9| 5000| 90.9|\n| Colombia | 43593035| 92,5| 6300| 92.5|\n| Comoros | 690948| 56,5| 700| 56.5|\n| Congo, Dem. Rep. | 62660551| 65,5| 700| 65.5|\n|Congo, Repub. of ...| 3702314| 83,8| 700| 83.8|\n| Cook Islands | 21388| 95,0| 5000| 95.0|\n| Costa Rica | 4075261| 96,0| 9100| 96.0|\n| Cote d'Ivoire | 17654843| 50,9| 1400| 50.9|\n| Croatia | 4494749| 98,5| 10600| 98.5|\n+--------------------+----------+------------+------------------+--------+\nonly showing top 50 rows\n\n" ], [ "full_countries.where((col(\"Literacy\") < 50) & (col(\"GDP ($ per capita)\") > 700)).show()", "+--------------+----------+------------+------------------+--------+\n| Country|Population|Literacy (%)|GDP ($ per capita)|Literacy|\n+--------------+----------+------------+------------------+--------+\n| Angola | 12127071| 42,0| 1900| 42.0|\n| Bangladesh | 147365352| 43,1| 1900| 43.1|\n| Benin | 7862944| 40,9| 1100| 40.9|\n| Bhutan | 2279723| 42,2| 1300| 42.2|\n| Burkina Faso | 13902972| 26,6| 1100| 26.6|\n| Chad | 9944201| 47,5| 1200| 47.5|\n| Gambia, The | 1641564| 40,1| 1700| 40.1|\n| Guinea | 9690222| 35,9| 2100| 35.9|\n|Guinea-Bissau | 1442029| 42,4| 800| 42.4|\n| Iraq | 26783383| 40,4| 1500| 40.4|\n| Mali | 11716829| 46,4| 900| 46.4|\n| Mauritania | 3177388| 41,7| 1800| 41.7|\n| Mozambique | 19686505| 47,8| 1200| 47.8|\n| Nepal | 28287147| 45,2| 1400| 45.2|\n| Niger | 12525094| 17,6| 800| 17.6|\n| Pakistan | 165803560| 45,7| 2100| 45.7|\n| Senegal | 11987121| 40,2| 1600| 40.2|\n+--------------+----------+------------+------------------+--------+\n\n" ], [ "full_countries.toPandas().plot(x=\"Literacy\",y=\"GDP ($ per capita)\",kind=\"scatter\",figsize=(10, 6))", "_____no_output_____" ] ], [ [ "# Useful information\n\n * https://spark.apache.org/docs/latest/quick-start.html\n * https://spark.apache.org/docs/latest/sql-programming-guide.html\n * https://pandas.pydata.org/pandas-docs/stable/visualization.html", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ] ]
c52cb35f1209db5f7b2f4b5db28b106475c2e16f
4,357
ipynb
Jupyter Notebook
inference/explore_heatmaps.ipynb
TropComplique/ExtremeNet
b2caa642cedf419104f1945ada445954885e3371
[ "MIT" ]
null
null
null
inference/explore_heatmaps.ipynb
TropComplique/ExtremeNet
b2caa642cedf419104f1945ada445954885e3371
[ "MIT" ]
null
null
null
inference/explore_heatmaps.ipynb
TropComplique/ExtremeNet
b2caa642cedf419104f1945ada445954885e3371
[ "MIT" ]
1
2021-05-24T11:06:18.000Z
2021-05-24T11:06:18.000Z
23.05291
96
0.510672
[ [ [ "import sys\nimport torch\nfrom PIL import Image\nimport numpy as np\n\nsys.path.append('../')\nfrom detector.architecture import Architecture", "_____no_output_____" ] ], [ [ "# Load a trained model", "_____no_output_____" ] ], [ [ "model = Architecture(num_outputs=5 + 10)\nmodel.eval()\nmodel.load_state_dict(torch.load('../models/run00.pth', map_location=torch.device('cpu')))", "_____no_output_____" ] ], [ [ "# Get an image", "_____no_output_____" ] ], [ [ "image = Image.open('/home/dan/datasets/COCO/images/val2017/000000000885.jpg')\nprint(image.size)\nimage = image.resize((640, 448))\nimage", "_____no_output_____" ] ], [ [ "# Predict", "_____no_output_____" ] ], [ [ "image_tensor = torch.FloatTensor(np.array(image)/255.0)\nimage_tensor = image_tensor.unsqueeze(0).permute(0, 3, 1, 2)\n\nwith torch.no_grad():\n x, features = model(image_tensor)\n \nheatmaps, offsets = torch.split(x, [5, 10], dim=1)\nheatmaps = torch.sigmoid(heatmaps)[0]", "_____no_output_____" ] ], [ [ "# Show masks", "_____no_output_____" ] ], [ [ "def show_mask(image, mask):\n\n red = np.array([255, 0, 0], dtype='uint8')\n gray_mask = mask.numpy().astype('uint8')\n color_mask = red * np.expand_dims(gray_mask, 2)\n\n gray_mask = Image.fromarray(100 * gray_mask)\n color_mask = Image.fromarray(color_mask)\n color_mask.putalpha(gray_mask)\n \n image_copy = image.copy()\n image_copy.putalpha(255)\n width, height = image.size\n image_copy.alpha_composite(color_mask.resize((width, height)))\n return image_copy", "_____no_output_____" ], [ "mask = features['p2'][0, 0]\nmask = (mask - mask.min())/(mask.max() - mask.min())", "_____no_output_____" ], [ "show_mask(image, mask > 0.5)", "_____no_output_____" ] ], [ [ "# Show heatmaps", "_____no_output_____" ] ], [ [ "image_copy = image.copy()\nimage_copy.putalpha(255)\nwidth, height = image.size\n\ncolors = {\n 0: [255, 0, 0], # red - top\n 1: [0, 0, 255], # blue - bottom\n 2: [255, 255, 0], # yellow - left\n 3: [255, 0, 255], # pink - right\n 4: [0, 255, 0] # green - center\n}\n\nfor i, color in colors.items():\n\n h = np.expand_dims(heatmaps[i].numpy() > 0.1, 2)\n gray_h = Image.fromarray(255*h[:, :, 0].astype('uint8'))\n color_h = Image.fromarray((color * h).astype('uint8'))\n \n color_h = color_h.resize((width, height))\n color_h.putalpha(gray_h.resize((width, height)))\n \n image_copy.alpha_composite(color_h)\n \nimage_copy", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
c52cb881af6f12768048f433cffc03af5f12a640
261,361
ipynb
Jupyter Notebook
boston_housing/boston_housing.ipynb
fierceX/Udacity_Report
5eb9b179a3adda3dfaea598ab839a2ea6c164be4
[ "MIT" ]
2
2019-02-13T01:32:48.000Z
2019-02-18T17:25:05.000Z
boston_housing/boston_housing.ipynb
fierceX/Udacity_Report
5eb9b179a3adda3dfaea598ab839a2ea6c164be4
[ "MIT" ]
null
null
null
boston_housing/boston_housing.ipynb
fierceX/Udacity_Report
5eb9b179a3adda3dfaea598ab839a2ea6c164be4
[ "MIT" ]
null
null
null
299.725917
126,178
0.916812
[ [ [ "# 机器学习工程师纳米学位\n## 模型评价与验证\n## 项目 1: 预测波士顿房价\n\n\n欢迎来到机器学习工程师纳米学位的第一个项目!在此文件中,有些示例代码已经提供给你,但你还需要实现更多的功能来让项目成功运行。除非有明确要求,你无须修改任何已给出的代码。以**'练习'**开始的标题表示接下来的内容中有需要你必须实现的功能。每一部分都会有详细的指导,需要实现的部分也会在注释中以**'TODO'**标出。请仔细阅读所有的提示!\n\n除了实现代码外,你还**必须**回答一些与项目和实现有关的问题。每一个需要你回答的问题都会以**'问题 X'**为标题。请仔细阅读每个问题,并且在问题后的**'回答'**文字框中写出完整的答案。你的项目将会根据你对问题的回答和撰写代码所实现的功能来进行评分。\n\n>**提示:**Code 和 Markdown 区域可通过 **Shift + Enter** 快捷键运行。此外,Markdown可以通过双击进入编辑模式。", "_____no_output_____" ], [ "## 开始\n在这个项目中,你将利用马萨诸塞州波士顿郊区的房屋信息数据训练和测试一个模型,并对模型的性能和预测能力进行测试。通过该数据训练后的好的模型可以被用来对房屋做特定预测---尤其是对房屋的价值。对于房地产经纪等人的日常工作来说,这样的预测模型被证明非常有价值。\n\n此项目的数据集来自[UCI机器学习知识库](https://archive.ics.uci.edu/ml/datasets/Housing)。波士顿房屋这些数据于1978年开始统计,共506个数据点,涵盖了麻省波士顿不同郊区房屋14种特征的信息。本项目对原始数据集做了以下处理:\n- 有16个`'MEDV'` 值为50.0的数据点被移除。 这很可能是由于这些数据点包含**遗失**或**看不到的值**。\n- 有1个数据点的 `'RM'` 值为8.78. 这是一个异常值,已经被移除。\n- 对于本项目,房屋的`'RM'`, `'LSTAT'`,`'PTRATIO'`以及`'MEDV'`特征是必要的,其余不相关特征已经被移除。\n- `'MEDV'`特征的值已经过必要的数学转换,可以反映35年来市场的通货膨胀效应。\n\n运行下面区域的代码以载入波士顿房屋数据集,以及一些此项目所需的Python库。如果成功返回数据集的大小,表示数据集已载入成功。", "_____no_output_____" ] ], [ [ "# Import libraries necessary for this project\n# 载入此项目所需要的库\nimport numpy as np\nimport pandas as pd\nimport visuals as vs # Supplementary code\nfrom sklearn.model_selection import ShuffleSplit\n\n# Pretty display for notebooks\n# 让结果在notebook中显示\n%matplotlib inline\n\n# Load the Boston housing dataset\n# 载入波士顿房屋的数据集\ndata = pd.read_csv('housing.csv')\nprices = data['MEDV']\nfeatures = data.drop('MEDV', axis = 1)\n \n# Success\n# 完成\nprint \"Boston housing dataset has {} data points with {} variables each.\".format(*data.shape)", "Boston housing dataset has 489 data points with 4 variables each.\n" ] ], [ [ "## 分析数据\n在项目的第一个部分,你会对波士顿房地产数据进行初步的观察并给出你的分析。通过对数据的探索来熟悉数据可以让你更好地理解和解释你的结果。\n\n由于这个项目的最终目标是建立一个预测房屋价值的模型,我们需要将数据集分为**特征(features)**和**目标变量(target variable)**。**特征** `'RM'`, `'LSTAT'`,和 `'PTRATIO'`,给我们提供了每个数据点的数量相关的信息。**目标变量**:` 'MEDV'`,是我们希望预测的变量。他们分别被存在`features`和`prices`两个变量名中。", "_____no_output_____" ], [ "## 练习:基础统计运算\n你的第一个编程练习是计算有关波士顿房价的描述统计数据。我们已为你导入了` numpy `,你需要使用这个库来执行必要的计算。这些统计数据对于分析模型的预测结果非常重要的。\n在下面的代码中,你要做的是:\n- 计算`prices`中的`'MEDV'`的最小值、最大值、均值、中值和标准差;\n- 将运算结果储存在相应的变量中。", "_____no_output_____" ] ], [ [ "# TODO: Minimum price of the data\n#目标:计算价值的最小值\nminimum_price = np.min(data['MEDV'])\n\n# TODO: Maximum price of the data\n#目标:计算价值的最大值\nmaximum_price = np.max(data['MEDV'])\n\n# TODO: Mean price of the data\n#目标:计算价值的平均值\nmean_price = np.average(data['MEDV'])\n\n# TODO: Median price of the data\n#目标:计算价值的中值\nmedian_price = np.median(data['MEDV'])\n\n# TODO: Standard deviation of prices of the data\n#目标:计算价值的标准差\nstd_price = np.std(data['MEDV'])\n\n# Show the calculated statistics\n#目标:输出计算的结果\nprint \"Statistics for Boston housing dataset:\\n\"\nprint \"Minimum price: ${:,.2f}\".format(minimum_price)\nprint \"Maximum price: ${:,.2f}\".format(maximum_price)\nprint \"Mean price: ${:,.2f}\".format(mean_price)\nprint \"Median price ${:,.2f}\".format(median_price)\nprint \"Standard deviation of prices: ${:,.2f}\".format(std_price)", "Statistics for Boston housing dataset:\n\nMinimum price: $105,000.00\nMaximum price: $1,024,800.00\nMean price: $454,342.94\nMedian price $438,900.00\nStandard deviation of prices: $165,171.13\n" ] ], [ [ "### 问题1 - 特征观察\n\n如前文所述,本项目中我们关注的是其中三个值:`'RM'`、`'LSTAT'` 和`'PTRATIO'`,对每一个数据点:\n- `'RM'` 是该地区中每个房屋的平均房间数量;\n- `'LSTAT'` 是指该地区有多少百分比的房东属于是低收入阶层(有工作但收入微薄);\n- `'PTRATIO'` 是该地区的中学和小学里,学生和老师的数目比(`学生/老师`)。\n\n_凭直觉,上述三个特征中对每一个来说,你认为增大该特征的数值,`'MEDV'`的值会是**增大**还是**减小**呢?每一个答案都需要你给出理由。_\n\n**提示:**你预期一个`'RM'` 值是6的房屋跟`'RM'` 值是7的房屋相比,价值更高还是更低呢?", "_____no_output_____" ], [ "**回答: ** \n- 'RM'增大'MEDV'会增大,因为某地区房间平均数量高的话,就意味着这个地方的房子比较好比较大,比如别墅式。而平均房间数量小的话可能是因为该地方比较贫穷,买不起较大的房子,房价自然就会低。\n- 'LSTAT' 增大'MEDV'会减小,如果一个地区的房东是高收入阶层,那么这个地区应该是个富人聚集地,房价也会高,反之房价会比较低。\n- 'PTRATIO'增大'MEDV'会减小,如果学生老师比较小的话,说明当地教育资源比较好,生活水平比较好,房价会高,反之说明教育资源比较少,房价比较低。", "_____no_output_____" ], [ "## 建模\n在项目的第二部分中,你需要了解必要的工具和技巧来让你的模型进行预测。用这些工具和技巧对每一个模型的表现做精确的衡量可以极大地增强你预测的信心。", "_____no_output_____" ], [ "### 练习:定义衡量标准\n如果不能对模型的训练和测试的表现进行量化地评估,我们就很难衡量模型的好坏。通常我们会定义一些衡量标准,这些标准可以通过对某些误差或者拟合程度的计算来得到。在这个项目中,你将通过运算[*决定系数*](http://stattrek.com/statistics/dictionary.aspx?definition=coefficient_of_determination) R<sup>2</sup> 来量化模型的表现。模型的决定系数是回归分析中十分常用的统计信息,经常被当作衡量模型预测能力好坏的标准。\n\nR<sup>2</sup>的数值范围从0至1,表示**目标变量**的预测值和实际值之间的相关程度平方的百分比。一个模型的R<sup>2</sup> 值为0还不如直接用**平均值**来预测效果好;而一个R<sup>2</sup> 值为1的模型则可以对目标变量进行完美的预测。从0至1之间的数值,则表示该模型中目标变量中有百分之多少能够用**特征**来解释。_模型也可能出现负值的R<sup>2</sup>,这种情况下模型所做预测有时会比直接计算目标变量的平均值差很多。_\n\n在下方代码的 `performance_metric` 函数中,你要实现:\n- 使用 `sklearn.metrics` 中的 `r2_score` 来计算 `y_true` 和 `y_predict`的R<sup>2</sup>值,作为对其表现的评判。\n- 将他们的表现评分储存到`score`变量中。", "_____no_output_____" ] ], [ [ "# TODO: Import 'r2_score'\nfrom sklearn.metrics import r2_score\ndef performance_metric(y_true, y_predict):\n \"\"\" Calculates and returns the performance score between \n true and predicted values based on the metric chosen. \"\"\"\n \n # TODO: Calculate the performance score between 'y_true' and 'y_predict'\n score = r2_score(y_true,y_predict)\n \n # Return the score\n return score", "_____no_output_____" ] ], [ [ "### 问题2 - 拟合程度\n\n假设一个数据集有五个数据且一个模型做出下列目标变量的预测:\n\n| 真实数值 | 预测数值 |\n| :-------------: | :--------: |\n| 3.0 | 2.5 |\n| -0.5 | 0.0 |\n| 2.0 | 2.1 |\n| 7.0 | 7.8 |\n| 4.2 | 5.3 |\n*你会觉得这个模型已成功地描述了目标变量的变化吗?如果成功,请解释为什么,如果没有,也请给出原因。* \n\n运行下方的代码,使用`performance_metric`函数来计算模型的决定系数。", "_____no_output_____" ] ], [ [ "# Calculate the performance of this model\nscore = performance_metric([3, -0.5, 2, 7, 4.2], [2.5, 0.0, 2.1, 7.8, 5.3])\nprint \"Model has a coefficient of determination, R^2, of {:.3f}.\".format(score)", "Model has a coefficient of determination, R^2, of 0.923.\n" ] ], [ [ "**回答:** 我认为这个模型能够成功地描述目标变量的变化,因为真实值和预测值都比较接近,R^2系数也比较高。", "_____no_output_____" ], [ "### 练习: 数据分割与重排\n接下来,你需要把波士顿房屋数据集分成训练和测试两个子集。通常在这个过程中,数据也会被重新排序,以消除数据集中由于排序而产生的偏差。\n在下面的代码中,你需要:\n- 使用 `sklearn.model_selection` 中的 `train_test_split`, 将`features`和`prices`的数据都分成用于训练的数据子集和用于测试的数据子集。\n - 分割比例为:80%的数据用于训练,20%用于测试;\n - 选定一个数值以设定 `train_test_split` 中的 `random_state` ,这会确保结果的一致性;\n- 最终分离出的子集为`X_train`,`X_test`,`y_train`,和`y_test`。", "_____no_output_____" ] ], [ [ "# TODO: Import 'train_test_split'\nfrom sklearn.model_selection import train_test_split\n\n# TODO: Shuffle and split the data into training and testing subsets\nX_train, X_test, y_train, y_test = train_test_split(features, prices, test_size=0.20, random_state=42)\n\n# Success\nprint \"Training and testing split was successful.\"", "Training and testing split was successful.\n" ] ], [ [ "### 问题 3- 训练及测试\n*将数据集按一定比例分为训练用的数据集和测试用的数据集对学习算法有什么好处?*\n\n**提示:** 如果没有数据来对模型进行测试,会出现什么问题?", "_____no_output_____" ], [ "**答案: ** 如果没有数据对模型进行测试,那么模型可能会存在过拟合的问题,也就是说,在训练集上,模型表现得很完美,而不在训练集上的数据,则表现的很差,过拟合是可能模型学习了训练集做特有的误差特征。分为训练用和测试用的数据集,可以检验学习算法对未知数据的性能。", "_____no_output_____" ], [ "----\n\n## 分析模型的表现\n在项目的第三部分,我们来看一下几个模型针对不同的数据集在学习和测试上的表现。另外,你需要专注于一个特定的算法,用全部训练集训练时,提高它的`'max_depth'` 参数,观察这一参数的变化如何影响模型的表现。把你模型的表现画出来对于分析过程十分有益。可视化可以让我们看到一些单看结果看不到的行为。", "_____no_output_____" ], [ "### 学习曲线\n下方区域内的代码会输出四幅图像,它们是一个决策树模型在不同最大深度下的表现。每一条曲线都直观的显示了随着训练数据量的增加,模型学习曲线的训练评分和测试评分的变化。注意,曲线的阴影区域代表的是该曲线的不确定性(用标准差衡量)。这个模型的训练和测试部分都使用决定系数R<sup>2</sup>来评分。\n\n运行下方区域中的代码,并利用输出的图形回答下面的问题。", "_____no_output_____" ] ], [ [ "# Produce learning curves for varying training set sizes and maximum depths\nvs.ModelLearning(features, prices)", "_____no_output_____" ] ], [ [ "### 问题 4 - 学习数据\n*选择上述图像中的其中一个,并给出其最大深度。随着训练数据量的增加,训练曲线的评分有怎样的变化?测试曲线呢?如果有更多的训练数据,是否能有效提升模型的表现呢?*\n**提示:**学习曲线的评分是否最终会收敛到特定的值?", "_____no_output_____" ], [ "**答案: ** 第一幅图像,最大深度是1,训练曲线的评分从1.0开始逐渐下降,而测试曲线则从0.0开始逐渐上升,如果有更多的训练数据,训练模型也不会有更大的提升,因为深度有限,模型的表现能力趋于特定值。随着最大深度的增加,训练数据表现能力会逐渐提升,而测试曲线随着最大深度的增加,模型的能力会逐渐下降,也就是出现了过拟合,这就是第四幅图像出现的问题。", "_____no_output_____" ], [ "### 复杂度曲线\n下列代码内的区域会输出一幅图像,它展示了一个已经经过训练和验证的决策树模型在不同最大深度条件下的表现。这个图形将包含两条曲线,一个是训练的变化,一个是测试的变化。跟**学习曲线**相似,阴影区域代表该曲线的不确定性,模型训练和测试部分的评分都用的 `performance_metric` 函数。\n\n运行下方区域中的代码,并利用输出的图形并回答下面的两个问题。", "_____no_output_____" ] ], [ [ "vs.ModelComplexity(X_train, y_train)", "_____no_output_____" ] ], [ [ "### 问题 5- 偏差与方差之间的权衡取舍\n*当模型以最大深度 1训练时,模型的预测是出现很大的偏差还是出现了很大的方差?当模型以最大深度10训练时,情形又如何呢?图形中的哪些特征能够支持你的结论?*\n \n**提示:** 你如何得知模型是否出现了偏差很大或者方差很大的问题?", "_____no_output_____" ], [ "**答案: ** 当最大深度为1时,模型有很大的偏差,以为此时训练数据表现得分和测试表现得分比较接近而且很低,所以模型应出现了较大的偏差,而当最大深度为10时,训练得分较高,而测试得分与训练得分有较大的差距,模型应该出现了较大的方差。因为训练得分很高但和测试得分差距较大。", "_____no_output_____" ], [ "### 问题 6- 最优模型的猜测\n*你认为最大深度是多少的模型能够最好地对未见过的数据进行预测?你得出这个答案的依据是什么?*", "_____no_output_____" ], [ "**答案: ** 最大深度应该是4,因为在测试得分上,当最大深度为4时,模型的测试得分最高,此时模型的方差和偏差趋于平衡,如果深度再大,测会出现高方差,深度减小,则会出现高偏差。", "_____no_output_____" ], [ "-----\n\n## 评价模型表现\n在这个项目的最后,你将自己建立模型,并使用最优化的`fit_model`函数,基于客户房子的特征来预测该房屋的价值。", "_____no_output_____" ], [ "### 问题 7- 网格搜索(Grid Search)\n*什么是网格搜索法?如何用它来优化学习算法?*\n", "_____no_output_____" ], [ "**回答: ** 网格搜索法是使用不同的参数组合来训练模型,通过开发人员指定的相关参数组合,例如在决策树里,不同的参数组合主要是决策树的最大深度。最后通过遍历不同的参数组合,得出一个最优的参数组合和模型,可以自动化搜索最优参数和最优模型", "_____no_output_____" ], [ "### 问题 8- 交叉验证\n*什么是K折交叉验证法(k-fold cross-validation)?优化模型时,使用这种方法对网格搜索有什么好处?网格搜索是如何结合交叉验证来完成对最佳参数组合的选择的?* \n\n**提示:** 跟为何需要一组测试集的原因差不多,网格搜索时如果不使用交叉验证会有什么问题?GridSearchCV中的[`'cv_results'`](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html)属性能告诉我们什么?", "_____no_output_____" ], [ "**答案: ** K折交叉验证法是将数据集等分成若干份,其中一份作为验证集而剩下的最为训练集,循环使用不同的验证集和训练集,最后对所有结果取平均,交叉验证能更好的训练模型,因为差不多使用了所有的数据集,通过使用交叉验证法,网格搜索能够更准确的搜索出最优的参数组合。", "_____no_output_____" ], [ "### 练习:训练模型\n在最后一个练习中,你将需要将所学到的内容整合,使用**决策树演算法**训练一个模型。为了保证你得出的是一个最优模型,你需要使用网格搜索法训练模型,以找到最佳的 `'max_depth'` 参数。你可以把`'max_depth'` 参数理解为决策树算法在做出预测前,允许其对数据提出问题的数量。决策树是**监督学习算法**中的一种。\n\n此外,你会发现你的实现使用的是 `ShuffleSplit()` 。它也是交叉验证的一种方式(见变量 `'cv_sets'`)。虽然这不是**问题8**中描述的 K-Fold 交叉验证,这个教程验证方法也很有用!这里 `ShuffleSplit()` 会创造10个(`'n_splits'`)混洗过的集合,每个集合中20%(`'test_size'`)的数据会被用作**验证集**。当你在实现的时候,想一想这跟 K-Fold 交叉验证有哪些相同点,哪些不同点?\n\n在下方 `fit_model` 函数中,你需要做的是:\n- 使用 `sklearn.tree` 中的 [`DecisionTreeRegressor`](http://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeRegressor.html) 创建一个决策树的回归函数;\n - 将这个回归函数储存到 `'regressor'` 变量中;\n- 为 `'max_depth'` 创造一个字典,它的值是从1至10的数组,并储存到 `'params'` 变量中;\n- 使用 `sklearn.metrics` 中的 [`make_scorer`](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html) 创建一个评分函数;\n - 将 `performance_metric` 作为参数传至这个函数中;\n - 将评分函数储存到 `'scoring_fnc'` 变量中;\n- 使用 `sklearn.model_selection` 中的 [`GridSearchCV`](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html) 创建一个网格搜索对象;\n - 将变量`'regressor'`, `'params'`, `'scoring_fnc'`, 和 `'cv_sets'` 作为参数传至这个对象中;\n - 将 `GridSearchCV` 存到 `'grid'` 变量中。\n \n如果有同学对python函数如何传递多个参数不熟悉,可以参考这个MIT课程的[视频](http://cn-static.udacity.com/mlnd/videos/MIT600XXT114-V004200_DTH.mp4)。", "_____no_output_____" ] ], [ [ "# TODO: Import 'make_scorer', 'DecisionTreeRegressor', and 'GridSearchCV'\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.metrics import make_scorer\nfrom sklearn.model_selection import GridSearchCV\n\ndef fit_model(X, y):\n \"\"\" Performs grid search over the 'max_depth' parameter for a \n decision tree regressor trained on the input data [X, y]. \"\"\"\n \n # Create cross-validation sets from the training data\n cv_sets = ShuffleSplit(n_splits = 10, test_size = 0.20, random_state = 0)\n\n # TODO: Create a decision tree regressor object\n regressor = DecisionTreeRegressor()\n\n # TODO: Create a dictionary for the parameter 'max_depth' with a range from 1 to 10\n params = {'max_depth':[1,2,3,4,5,6,7,8,9,10]}\n\n # TODO: Transform 'performance_metric' into a scoring function using 'make_scorer' \n scoring_fnc = make_scorer(performance_metric)\n\n # TODO: Create the grid search object\n grid = GridSearchCV(regressor,params,scoring=scoring_fnc,cv=cv_sets)\n\n # Fit the grid search object to the data to compute the optimal model\n grid = grid.fit(X, y)\n\n # Return the optimal model after fitting the data\n return grid.best_estimator_", "_____no_output_____" ] ], [ [ "### 做出预测\n当我们用数据训练出一个模型,它现在就可用于对新的数据进行预测。在决策树回归函数中,模型已经学会对新输入的数据*提问*,并返回对**目标变量**的预测值。你可以用这个预测来获取数据未知目标变量的信息,这些数据必须是不包含在训练数据之内的。", "_____no_output_____" ], [ "### 问题 9- 最优模型\n*最优模型的最大深度(maximum depth)是多少?此答案与你在**问题 6**所做的猜测是否相同?*\n\n运行下方区域内的代码,将决策树回归函数代入训练数据的集合,以得到最优化的模型。", "_____no_output_____" ] ], [ [ "# Fit the training data to the model using grid search\nreg = fit_model(X_train, y_train)\n\n# Produce the value for 'max_depth'\nprint \"Parameter 'max_depth' is {} for the optimal model.\".format(reg.get_params()['max_depth'])", "Parameter 'max_depth' is 4 for the optimal model.\n" ] ], [ [ "**Answer: ** 最好的模型的深度是4", "_____no_output_____" ], [ "### 问题 10 - 预测销售价格\n想像你是一个在波士顿地区的房屋经纪人,并期待使用此模型以帮助你的客户评估他们想出售的房屋。你已经从你的三个客户收集到以下的资讯:\n\n| 特征 | 客戶 1 | 客戶 2 | 客戶 3 |\n| :---: | :---: | :---: | :---: |\n| 房屋内房间总数 | 5 间房间 | 4 间房间 | 8 间房间 |\n| 社区贫困指数(%被认为是贫困阶层) | 17% | 32% | 3% |\n| 邻近学校的学生-老师比例 | 15:1 | 22:1 | 12:1 |\n\n*你会建议每位客户的房屋销售的价格为多少?从房屋特征的数值判断,这样的价格合理吗?* \n\n**提示:**用你在**分析数据**部分计算出来的统计信息来帮助你证明你的答案。\n\n运行下列的代码区域,使用你优化的模型来为每位客户的房屋价值做出预测。", "_____no_output_____" ] ], [ [ "# Produce a matrix for client data\nclient_data = [[5, 17, 15], # Client 1\n [4, 32, 22], # Client 2\n [8, 3, 12]] # Client 3\n\n# Show predictions\nfor i, price in enumerate(reg.predict(client_data)):\n print \"Predicted selling price for Client {}'s home: ${:,.2f}\".format(i+1, price)", "Predicted selling price for Client 1's home: $403,025.00\nPredicted selling price for Client 2's home: $237,478.72\nPredicted selling price for Client 3's home: $931,636.36\n" ] ], [ [ "**答案: **\n- Client 1's home: \\$403,025.00\n- Client 2's home: \\$237,478.72 \n- Client 3's home: \\$931,636.36 \n从上面的预测结果来看,这样的价格还算合理,房间多的价格教贵,学生老师比小的,社区贫困指数也较低,说明和预期分析的差不多。而且整体符合之前的统计量,都在最大值和最小值之间,", "_____no_output_____" ], [ "### 敏感度\n\n一个最优的模型不一定是一个健壮模型。有的时候模型会过于复杂或者过于简单,以致于难以泛化新增添的数据;有的时候模型采用的学习算法并不适用于特定的数据结构;有的时候样本本身可能有太多噪点或样本过少,使得模型无法准确地预测目标变量。这些情况下我们会说模型是欠拟合的。执行下方区域中的代码,采用不同的训练和测试集执行 `fit_model` 函数10次。注意观察对一个特定的客户来说,预测是如何随训练数据的变化而变化的。", "_____no_output_____" ] ], [ [ "vs.PredictTrials(features, prices, fit_model, client_data)", "Trial 1: $391,183.33\nTrial 2: $419,700.00\nTrial 3: $415,800.00\nTrial 4: $420,622.22\nTrial 5: $418,377.27\nTrial 6: $411,931.58\nTrial 7: $399,663.16\nTrial 8: $407,232.00\nTrial 9: $351,577.61\nTrial 10: $413,700.00\n\nRange in prices: $69,044.61\n" ] ], [ [ "### 问题 11 - 实用性探讨\n*简单地讨论一下你建构的模型能否在现实世界中使用?* \n\n**提示:** 回答几个问题,并给出相应结论的理由:\n- *1978年所采集的数据,在今天是否仍然适用?*\n- *数据中呈现的特征是否足够描述一个房屋?*\n- *模型是否足够健壮来保证预测的一致性?*\n- *在波士顿这样的大都市采集的数据,能否应用在其它乡镇地区?*", "_____no_output_____" ], [ "**答案: **\n- 1978年所采集的数据,现在应该不使用了,因为随着时代的发展有些特征已经不再是影响房价的主要因素,而可能又出现新的特征影响房价。\n- 数据中呈现的特征我认为不足以描述一个房屋,因为除了这些因素以外,可能还存在着别的,比如面积,房屋的面积和房间的数量不是严格成正比的,长宽比,方形的房屋要比细长的好等。\n- 我认为模型已经相当健壮,多次预测同一个房屋,价格相近。\n- 像波士顿这种大都市采集的数据,不能应用到乡镇地区,因为大都市和乡镇有着不一样的特征。 \n最后,我认为该模型还不足以在现实世界中使用,还有一些缺陷。", "_____no_output_____" ], [ "### 可选问题 - 预测北京房价\n\n(本题结果不影响项目是否通过)通过上面的实践,相信你对机器学习的一些常用概念有了很好的领悟和掌握。但利用70年代的波士顿房价数据进行建模的确对我们来说意义不是太大。现在你可以把你上面所学应用到北京房价数据集中`bj_housing.csv`。\n\n免责声明:考虑到北京房价受到宏观经济、政策调整等众多因素的直接影响,预测结果仅供参考。\n\n这个数据集的特征有:\n- Area:房屋面积,平方米\n- Room:房间数,间\n- Living: 厅数,间\n- School: 是否为学区房,0或1\n- Year: 房屋建造时间,年\n- Floor: 房屋所处楼层,层\n\n目标变量:\n- Value: 房屋人民币售价,万\n\n你可以参考上面学到的内容,拿这个数据集来练习数据分割与重排、定义衡量标准、训练模型、评价模型表现、使用网格搜索配合交叉验证对参数进行调优并选出最佳参数,比较两者的差别,最终得出最佳模型对验证集的预测分数。", "_____no_output_____" ] ], [ [ "### 你的代码\nbj_data = pd.read_csv('bj_housing.csv')\nbj_prices = bj_data['Value']\nbj_features = bj_data.drop('Value', axis = 1)\nprint \"Boston housing dataset has {} data points with {} variables each.\".format(*bj_data.shape)\n\n\n\nX_train_bj, X_test_bj, y_train_bj, y_test_bj = train_test_split(bj_features, bj_prices, test_size=0.20, random_state=23)\n\nvs.ModelComplexity(X_train_bj, y_train_bj)\n\ndef fit_model_bj(X, y):\n \"\"\" Performs grid search over the 'max_depth' parameter for a \n decision tree regressor trained on the input data [X, y]. \"\"\"\n \n # Create cross-validation sets from the training data\n cv_sets = ShuffleSplit(n_splits = 10, test_size = 0.20, random_state = 0)\n\n # TODO: Create a decision tree regressor object\n regressor = DecisionTreeRegressor()\n\n # TODO: Create a dictionary for the parameter 'max_depth' with a range from 1 to 10\n params = {'max_depth':[1,2,3,4,5,6,7,8,9,10]}\n\n # TODO: Transform 'performance_metric' into a scoring function using 'make_scorer' \n scoring_fnc = make_scorer(performance_metric)\n\n # TODO: Create the grid search object\n grid = GridSearchCV(regressor,params,scoring=scoring_fnc,cv=cv_sets)\n\n # Fit the grid search object to the data to compute the optimal model\n grid = grid.fit(X, y)\n\n # Return the optimal model after fitting the data\n return grid.best_estimator_\nreg_bj = fit_model_bj(X_train_bj, y_train_bj)\n\n# Produce the value for 'max_depth'\nprint \"Parameter 'max_depth' is {} for the optimal model.\".format(reg_bj.get_params()['max_depth'])\n\ny_pre_bj = reg_bj.predict(X_test_bj)\ny_pre = reg.predict(X_test)\n\nprint performance_metric(y_test_bj,y_pre_bj)\nprint performance_metric(y_test,y_pre)", "Boston housing dataset has 9999 data points with 7 variables each.\n" ] ], [ [ "你成功的用新的数据集构建了模型了吗?他能对测试数据进行验证吗?它的表现是否符合你的预期?交叉验证是否有助于提升你模型的表现?", "_____no_output_____" ], [ "**答案:** 成功用新的数据集构建了模型,能对测试数据进行验证,表现一般,交叉验证有助于提升模型表现。", "_____no_output_____" ], [ "如果你是从零开始构建机器学习的代码会让你一时觉得无从下手。这时不要着急,你要做的只是查看之前写的代码,把每一行都看明白,然后逐步构建你的模型。当中遇到什么问题也可以在我们论坛寻找答案。也许你会发现你所构建的模型的表现并没有达到你的预期,这说明机器学习并非是一项简单的任务,构建一个表现良好的模型需要长时间的研究和测试。这也是我们接下来的课程中会逐渐学到的。", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
c52cc4098b8c832aee1ce0f6563096fb6cec0ec2
70,273
ipynb
Jupyter Notebook
thunder_svm.ipynb
seymayucer/FacialPhenotypes
043f3ecf956cad53095d93f19383c4c94e033692
[ "MIT" ]
2
2021-03-02T22:25:32.000Z
2021-03-06T23:53:13.000Z
thunder_svm.ipynb
seymayucer/FacialPhenotypes
043f3ecf956cad53095d93f19383c4c94e033692
[ "MIT" ]
null
null
null
thunder_svm.ipynb
seymayucer/FacialPhenotypes
043f3ecf956cad53095d93f19383c4c94e033692
[ "MIT" ]
1
2021-03-22T02:05:32.000Z
2021-03-22T02:05:32.000Z
48.099247
3,953
0.51105
[ [ [ "import glob\nimport random\nimport sys\nfrom itertools import chain\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn import preprocessing\nfrom sklearn.metrics import accuracy_score, confusion_matrix\n\nfrom thundersvm import SVC\nfrom tqdm import tqdm\n\nnp.random.seed(0)\nrandom.seed(0)", "_____no_output_____" ] ], [ [ "### TEST TRAIN INDEX GENERATION FOR FOLDS", "_____no_output_____" ] ], [ [ "attribute_map = [\n # {\"Race\": [\"African\", \"Asian\", \"Indian\", \"Caucasian\"]},\n {\"skintype\": [\"type1\", \"type2\", \"type3\", \"type4\", \"type5\", \"type6\"]},\n {\"eye\": [\"normal\", \"narrow\"]},\n {\"haircolor\": [\"red\", \"black\", \"gray\", \"brown\", \"blonde\"]},\n {\"hairtype\": [\"straight\", \"wavy\", \"bald\", \"curly\"]},\n {\"lips\": [\"small\", \"big\"]},\n {\"nose\": [\"wide\", \"narrow\"]},\n]", "_____no_output_____" ], [ "vgg = pd.read_csv(\"/mnt/HDD/FaceDatasetCenter/metadata/VGGFace2_metadata_FDA.csv\")\nvgg_test = vgg[vgg.type == \"test\"]\nvgg_test.head()\n\nvgg_image = pd.read_csv(\n \"/mnt/HDD/FaceDatasetCenter/metadata/VGGFace2_image_meta_test.csv\"\n)\nvgg_image_test = vgg_image[vgg_image.type == \"test\"]\nvgg_image_test = vgg_image_test.sort_values(by=\"file\")\nvgg_image_test.head()", "_____no_output_____" ], [ "NUM_FOLDS = 3\nTEST_SAMPLE_SIZE = 50\nfolds_folder = Path(\"folds\")\nfolds_folder.mkdir(exist_ok=True)\n", "_____no_output_____" ], [ "def generate_fold():\n \n all_folds = []\n for fold in range(0, NUM_FOLDS):\n print(TEST_SAMPLE_SIZE * NUM_FOLDS)\n print(f\"Fold {fold+1}\")\n class_folds = {\"train\": [], \"test\": []}\n for i, group in vgg_image_test.groupby(\"Class_ID\"):\n num_samples = group.shape[0]\n test_mask = np.zeros(num_samples, dtype=np.bool)\n if TEST_SAMPLE_SIZE * NUM_FOLDS > num_samples:\n start = fold * TEST_SAMPLE_SIZE\n end = start + TEST_SAMPLE_SIZE\n ix = [i % num_samples for i in range(start, end)]\n # print(f\"ClassID: {i}, fold: {fold} - [{ix[0]}:{ix[-1]}]\")\n else:\n class_fold_size = num_samples // NUM_FOLDS\n start = fold * class_fold_size\n end = start + class_fold_size\n ix = range(start, end)\n \n test_mask[ix] = True\n try:\n class_folds[\"test\"].append(\n group[test_mask].sample(n=TEST_SAMPLE_SIZE, random_state=0)\n )\n except:\n import pdb\n\n pdb.set_trace()\n class_folds[\"train\"].append(group[~test_mask])\n all_folds.append(class_folds)\n return all_folds", "_____no_output_____" ], [ "all_folds = generate_fold()\nprint(len(all_folds))\nfor i, fold in enumerate(all_folds):\n train = pd.concat(fold[\"train\"])\n test = pd.concat(fold[\"test\"])\n \n train.to_parquet(folds_folder / f\"fold_{i}_train.pq\", compression=\"GZIP\")\n test.to_parquet(folds_folder / f\"fold_{i}_test.pq\", compression=\"GZIP\")", "150\nFold 1\n150\nFold 2\n150\nFold 3\n3\n" ] ], [ [ "### Feature loading", "_____no_output_____" ] ], [ [ "features = np.load(\"features/vggface2_test_features.npy\",allow_pickle=True)\npath_arr = np.load(\"features/vggface2_test_paths.npy\",allow_pickle=True)", "_____no_output_____" ], [ "meta = pd.DataFrame(path_arr, columns=[\"full_path\"])\nmeta[\"file\"] = meta.full_path.apply(lambda x: \"/\".join(Path(x).parts[-2:]))\nlabels = list(map(lambda x: str(x).split(\"/\")[-2], path_arr))\nle = preprocessing.LabelEncoder()\nlabels = le.fit_transform(labels)\nmeta[\"y_test\"] = labels\nmeta = meta.merge(vgg_image_test,how='left',on='file')\nmeta.head()", "_____no_output_____" ], [ "#Train CODE\ndef train(X,y): \n all_predictions = []\n for i,fold in enumerate(range(0, NUM_FOLDS)):\n train_ixs = pd.read_parquet(folds_folder / f\"fold_{i}_100_train.pq\")\n test_ixs = pd.read_parquet(folds_folder / f\"fold_{i}_100_test.pq\")\n print(folds_folder / f\"fold_{i}_train.pq\")\n print(test_ixs.shape)\n print(meta[meta.file.isin(test_ixs.file)].index.shape)\n test_index = meta[meta.file.isin(test_ixs.file)].index\n train_index = meta[meta.file.isin(train_ixs.file)].index\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n\n print(X_test.shape,y_test.shape)\n print('SVM Training...')\n svm_model_linear = SVC(kernel=\"linear\", C=1).fit(X_train, y_train)\n print(\"Starting prediction (The computer can be unresponsive during the prediction).\")\n\n TEST_BATCH_SIZE = 1000\n preds = []\n ys=[]\n with tqdm(total=X_test.shape[0], file=sys.stdout) as pbar:\n for i in range(0, X_test.shape[0], TEST_BATCH_SIZE):\n X_test_batch = X_test[i : i + TEST_BATCH_SIZE]\n pred = svm_model_linear.predict(X_test_batch)\n preds.append(pred)\n # update tqdm\n pbar.set_description(\"Processed: %d\" % (1 + i))\n pbar.update(TEST_BATCH_SIZE)\n all_predictions.append(preds)c\n return all_predictions\n ", "_____no_output_____" ], [ "X = np.asarray(features, dtype=np.float32)\ny = labels\npred_list = train(X,y)", "folds/fold_0_train.pq\n file Class_ID Sample_Num Gender Race skintype type \\\n45 n000001/0046_01.jpg n000001 424 m Asian type4 test \n60 n000001/0061_01.jpg n000001 424 m Asian type4 test \n7 n000001/0008_01.jpg n000001 424 m Asian type4 test \n51 n000001/0052_01.jpg n000001 424 m Asian type4 test \n66 n000001/0067_01.jpg n000001 424 m Asian type4 test \n\n lips nose hairtype haircolor GenderM eye \n45 small wide bald gray m narrow \n60 small wide bald gray m narrow \n7 small wide bald gray m narrow \n51 small wide bald gray m narrow \n66 small wide bald gray m narrow \n(25000,)\n(25000, 512) (25000,)\nSVM Training...\nStarting prediction (The computer can be unresponsive during the prediction).\nProcessed: 24001: 100%|██████████| 25000/25000 [09:26<00:00, 44.09it/s]\nfolds/fold_1_train.pq\n file Class_ID Sample_Num Gender Race skintype type \\\n186 n000001/0181_01.jpg n000001 424 m Asian type4 test \n201 n000001/0196_01.jpg n000001 424 m Asian type4 test \n148 n000001/0140_02.jpg n000001 424 m Asian type4 test \n192 n000001/0187_01.jpg n000001 424 m Asian type4 test \n207 n000001/0202_01.jpg n000001 424 m Asian type4 test \n\n lips nose hairtype haircolor GenderM eye \n186 small wide bald gray m narrow \n201 small wide bald gray m narrow \n148 small wide bald gray m narrow \n192 small wide bald gray m narrow \n207 small wide bald gray m narrow \n(25000,)\n(25000, 512) (25000,)\nSVM Training...\nStarting prediction (The computer can be unresponsive during the prediction).\nProcessed: 24001: 100%|██████████| 25000/25000 [08:16<00:00, 50.35it/s]\nfolds/fold_2_train.pq\n file Class_ID Sample_Num Gender Race skintype type \\\n327 n000001/0329_01.jpg n000001 424 m Asian type4 test \n342 n000001/0350_01.jpg n000001 424 m Asian type4 test \n289 n000001/0287_01.jpg n000001 424 m Asian type4 test \n333 n000001/0337_01.jpg n000001 424 m Asian type4 test \n348 n000001/0360_01.jpg n000001 424 m Asian type4 test \n\n lips nose hairtype haircolor GenderM eye \n327 small wide bald gray m narrow \n342 small wide bald gray m narrow \n289 small wide bald gray m narrow \n333 small wide bald gray m narrow \n348 small wide bald gray m narrow \n(25000,)\n(25000, 512) (25000,)\nSVM Training...\nStarting prediction (The computer can be unresponsive during the prediction).\nProcessed: 24001: 100%|██████████| 25000/25000 [09:02<00:00, 46.11it/s]\n" ], [ "ap = np.asarray(pred_list)\nap = ap.reshape(ap.shape[0],ap.shape[1]*ap.shape[2])\nap.shape", "_____no_output_____" ], [ "# TEST CODE\nresult_dic = []\nfor i, fold in enumerate(range(0, 3)):\n train_ixs = pd.read_parquet(folds_folder / f\"fold_{i}_train.pq\")\n test_ixs = pd.read_parquet(folds_folder / f\"fold_{i}_test.pq\")\n\n print(meta.shape,test_ixs.index)\n meta_test = meta[meta.file.isin(test_ixs.file)]\n print(meta_test.shape)\n test_index = meta_test.index\n y_test = y[test_index]\n \n meta_test[\"y_pred\"] = ap[i]\n print(\"Overall Accuracy:\", accuracy_score(meta_test[\"y_test\"], meta_test[\"y_pred\"]))\n \n print(\"Group initilization!\")\n for attr in attribute_map:\n # for one value of one attribute\n for key, value in attr.items():\n for val in value:\n subgroup = meta_test[meta_test[key] == val]\n \n score= accuracy_score(subgroup[\"y_test\"], subgroup[\"y_pred\"])\n print(\n key,\n val,\n \":\",\n score,\n subgroup.shape,\n )\n \n \n result_dic.append([key,val,score,subgroup.shape[0]])", "(169396, 15) Int64Index([ 45, 60, 7, 51, 66, 27, 71, 54,\n 123, 8,\n ...\n 169205, 169222, 169163, 169194, 169228, 169176, 169215, 169181,\n 169191, 169186],\n dtype='int64', length=25000)\n(25000, 15)\nOverall Accuracy: 0.98764\nGroup initilization!\nskintype type1 : 0.96625 (800, 16)\nskintype type2 : 0.9903061224489796 (9800, 16)\nskintype type3 : 0.9867289719626168 (10700, 16)\nskintype type4 : 0.9870833333333333 (2400, 16)\nskintype type5 : 0.9888888888888889 (900, 16)\nskintype type6 : 0.99 (400, 16)\neye normal : 0.9878202247191011 (22250, 16)\neye narrow : 0.9861818181818182 (2750, 16)\nhaircolor red : 0.99 (200, 16)\nhaircolor black : 0.9899328859060402 (7450, 16)\nhaircolor gray : 0.993015873015873 (3150, 16)\nhaircolor brown : 0.9846783625730994 (8550, 16)\nhaircolor blonde : 0.9860176991150442 (5650, 16)\nhairtype straight : 0.9875728155339806 (15450, 16)\nhairtype wavy : 0.9869677419354839 (7750, 16)\nhairtype bald : 0.9942857142857143 (700, 16)\nhairtype curly : 0.9890909090909091 (1100, 16)\nlips small : 0.9887603305785124 (18150, 16)\nlips big : 0.9846715328467154 (6850, 16)\nnose wide : 0.9856880733944954 (10900, 16)\nnose narrow : 0.9891489361702127 (14100, 16)\n(169396, 15) Int64Index([ 186, 201, 148, 192, 207, 168, 212, 195,\n 264, 149,\n ...\n 169283, 169300, 169241, 169272, 169306, 169254, 169293, 169259,\n 169269, 169264],\n dtype='int64', length=25000)\n(25000, 15)\nOverall Accuracy: 0.97452\nGroup initilization!\nskintype type1 : 0.9575 (800, 16)\nskintype type2 : 0.9739795918367347 (9800, 16)\nskintype type3 : 0.9754205607476636 (10700, 16)\nskintype type4 : 0.9770833333333333 (2400, 16)\nskintype type5 : 0.9855555555555555 (900, 16)\nskintype type6 : 0.9575 (400, 16)\neye normal : 0.9756404494382023 (22250, 16)\neye narrow : 0.9654545454545455 (2750, 16)\nhaircolor red : 0.985 (200, 16)\nhaircolor black : 0.9754362416107383 (7450, 16)\nhaircolor gray : 0.9901587301587301 (3150, 16)\nhaircolor brown : 0.972280701754386 (8550, 16)\nhaircolor blonde : 0.9676106194690266 (5650, 16)\nhairtype straight : 0.9766990291262136 (15450, 16)\nhairtype wavy : 0.9690322580645161 (7750, 16)\nhairtype bald : 0.99 (700, 16)\nhairtype curly : 0.9727272727272728 (1100, 16)\nlips small : 0.9775757575757575 (18150, 16)\nlips big : 0.9664233576642336 (6850, 16)\nnose wide : 0.9744954128440367 (10900, 16)\nnose narrow : 0.9745390070921985 (14100, 16)\n(169396, 15) Int64Index([ 327, 342, 289, 333, 348, 309, 353, 336,\n 405, 290,\n ...\n 169361, 169378, 169319, 169350, 169384, 169332, 169371, 169337,\n 169347, 169342],\n dtype='int64', length=25000)\n(25000, 15)\nOverall Accuracy: 0.90724\nGroup initilization!\nskintype type1 : 0.86125 (800, 16)\nskintype type2 : 0.9057142857142857 (9800, 16)\nskintype type3 : 0.9071028037383178 (10700, 16)\nskintype type4 : 0.92375 (2400, 16)\nskintype type5 : 0.9111111111111111 (900, 16)\nskintype type6 : 0.9325 (400, 16)\neye normal : 0.9074157303370787 (22250, 16)\neye narrow : 0.9058181818181819 (2750, 16)\nhaircolor red : 0.91 (200, 16)\nhaircolor black : 0.9139597315436242 (7450, 16)\nhaircolor gray : 0.9206349206349206 (3150, 16)\nhaircolor brown : 0.9042105263157895 (8550, 16)\nhaircolor blonde : 0.8953982300884956 (5650, 16)\nhairtype straight : 0.9121682847896441 (15450, 16)\nhairtype wavy : 0.8981935483870968 (7750, 16)\nhairtype bald : 0.9285714285714286 (700, 16)\nhairtype curly : 0.8881818181818182 (1100, 16)\nlips small : 0.9126721763085399 (18150, 16)\nlips big : 0.8928467153284672 (6850, 16)\nnose wide : 0.9067889908256881 (10900, 16)\nnose narrow : 0.9075886524822695 (14100, 16)\n" ], [ "subgroup.shape", "_____no_output_____" ], [ "results = pd.DataFrame(result_dic,columns=['feature','category','acc','size'])\nresults[\"attribute_name\"] = results[\"feature\"] +'_'+ results[\"category\"]\nresults = results.groupby('attribute_name').mean().sort_values(by='attribute_name')\n\nresults = results.reset_index()\nresults['attribute'] = results.attribute_name.apply(lambda x:x.split('_')[0])\n\ntotal_size = results.groupby('attribute').sum()['size'][0]\nprint('total size',total_size)\nresults['Ratio']=results['size'].apply(lambda x: x/total_size)\nresults", "total size 25000\n" ], [ "results.to_csv('face_identifiacation_attribute_based_results.csv',index=False)", "_____no_output_____" ], [ "print(\"std:\", np.std(results.acc.values,ddof=1) * 100)\nprint(\"bias:\", (1-results.acc.values.min()) / (1-results.acc.values.max()))\n", "std: 0.8587194162970204\nbias: 2.4672131147540934\n" ], [ "(1.0-results.acc.values.min())/(1-results.acc.values.max())", "_____no_output_____" ], [ "1-results.acc.values.max()", "_____no_output_____" ], [ "results", "_____no_output_____" ], [ "(1.0-results.acc.values.min()),(1-results.acc.values.max())", "_____no_output_____" ], [ "# print(results[['feature_name','acc']].to_latex())\nresults[\"Ratio\"] = results[\"Ratio\"].apply(lambda x: f\"{100*x:.2f}\")\nresults[\"Acc\"] = results[\"acc\"].apply(lambda x: f\"{100*x:.2f}\")\n\nresults[\"attribute_name\"] = results[\"attribute_name\"].str.replace(\"skintype\", \"\")\nresults[\"attribute_name\"] = results[\"attribute_name\"].str.replace(\"haircolor\", \"hair \")\nresults[\"attribute_name\"] = results[\"attribute_name\"].str.replace(\"hairtype\", \"hair \")\nresults[\"attribute_name\"] = results[\"attribute_name\"].str.title()\nresults[\"attribute_name\"] = results[\"attribute_name\"].apply(\n lambda x: \" \".join(x.split(\"_\")[::-1])\n)\nresults = results.sort_values(by='Acc')\nattribute_res = results[[\"attribute_name\",\"Ratio\", \"Acc\"]]\n\nattribute_res = pd.concat(\n [\n attribute_res.iloc[:11].reset_index(drop=True),\n attribute_res.iloc[11:].reset_index(drop=True),\n \n \n ],\n axis=1,\n ignore_index=True,\n)\nattribute_res.columns = [\"Attribute Category\", \"Ratio (%)\",\"Accuracy (%)\", \"Attribute Category(%)\", \"Ratio\",\"Accuracy(%)\"]\nattribute_res", "_____no_output_____" ], [ "print(\n attribute_res.to_latex(\n index=False, caption=\"Table Caption\", label=\"tab:fi1\", na_rep=\"\"\n )\n)", "\\begin{table}\n\\centering\n\\caption{Table Caption}\n\\label{tab:fi1}\n\\begin{tabular}{llllll}\n\\toprule\nAttribute Category & Accuracy (\\%) & Ratio (\\%) & Attribute Category(\\%) & Ratio & Accuracy(\\%) \\\\\n\\midrule\n Type1 & 3.2\\% & 92.8\\% & Type2 & 39.2\\% & 95.7\\% \\\\\n Big Lips & 27.4\\% & 94.8\\% & Straight Hair & 61.8\\% & 95.9\\% \\\\\n Blonde Hair & 22.6\\% & 95.0\\% & Black Hair & 29.8\\% & 96.0\\% \\\\\n Curly Hair & 4.4\\% & 95.0\\% & Small Lips & 72.6\\% & 96.0\\% \\\\\n Wavy Hair & 31.0\\% & 95.1\\% & Type6 & 1.6\\% & 96.0\\% \\\\\n Narrow Eye & 11.0\\% & 95.2\\% & Red Hair & 0.8\\% & 96.2\\% \\\\\n Brown Hair & 34.2\\% & 95.4\\% & Type5 & 3.6\\% & 96.2\\% \\\\\n Wide Nose & 43.6\\% & 95.6\\% & Type4 & 9.6\\% & 96.3\\% \\\\\n Type3 & 42.8\\% & 95.6\\% & Gray Hair & 12.6\\% & 96.8\\% \\\\\n Narrow Nose & 56.4\\% & 95.7\\% & Bald Hair & 2.8\\% & 97.1\\% \\\\\n Normal Eye & 89.0\\% & 95.7\\% & & & \\\\\n\\bottomrule\n\\end{tabular}\n\\end{table}\n\n" ], [ "print(type(all_predictions), type(y_s))\nfor y_test, y_pred in zip(y_s, all_predictions):\n print(type(y_pred), type(y_test))\n y_pred = np.array(list(chain(*y_pred)))\n print(\"Overall Accuracy:\", accuracy_score(y_test, y_pred))", "<class 'list'> <class 'list'>\n<class 'list'> <class 'numpy.ndarray'>\nOverall Accuracy: 0.98984\n<class 'list'> <class 'numpy.ndarray'>\nOverall Accuracy: 0.98184\n<class 'list'> <class 'numpy.ndarray'>\nOverall Accuracy: 0.96732\n<class 'list'> <class 'numpy.ndarray'>\nOverall Accuracy: 0.94552\n<class 'list'> <class 'numpy.ndarray'>\nOverall Accuracy: 0.93396\n" ], [ "feature_arr = np.asarray(features, dtype=np.float32)\nprint(feature_arr[0][0], np.mean(feature_arr), np.std(feature_arr))\nfeature_arr = preprocessing.normalize(feature_arr)\nprint(feature_arr[0][0], np.mean(feature_arr), np.std(feature_arr))\nlabels = list(map(lambda x: x.split(\"/\")[-2], path_arr))\nle = preprocessing.LabelEncoder()\nlabels = le.fit_transform(labels)\nX = feature_arr\ny = labels\n\n# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)\n\nprint(\"Data is ready!\", X.shape, X.shape)", "0.88497335 -0.0055923657 0.8592094\n0.045853145 -0.00028270765 0.044193346\nData is ready! (169396, 512) (169396, 512)\n" ], [ "sss = StratifiedShuffleSplit(n_splits=1, test_size=0.1, random_state=0)\nfor train_index, test_index in sss.split(X, y):\n print(\"TRAIN:\", type(train_index), \"TEST:\", type(test_index))\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n svm_model_linear = SVC(kernel=\"linear\", C=20).fit(X_train, y_train)\n print(\"Training is completed.\")", "TRAIN: <class 'numpy.ndarray'> TEST: <class 'numpy.ndarray'>\nTraining is completed.\n" ], [ "# import datetime\n# timestr = datetime.datetime.now().isoformat().replace(\":\", \".\")\n# svm_model_file = f\"svm_model_{timestr}\"\n# svm_model_linear.save_to_file(svm_model_file)\n# print(f\"Saved model to: {svm_model_file}\")", "_____no_output_____" ], [ "import sys\nfrom itertools import chain\n\nfrom tqdm import tqdm\n\nprint(\"Starting prediction (The computer can be unresponsive during the prediction).\")\n\nTEST_BATCH_SIZE = 1000\npreds = []\nwith tqdm(total=X_test.shape[0], file=sys.stdout) as pbar:\n for i in range(0, X_test.shape[0], TEST_BATCH_SIZE):\n X_test_batch = X_test[i : i + TEST_BATCH_SIZE]\n pred = svm_model_linear.predict(X_test_batch)\n preds.append(pred)\n # update tqdm\n pbar.set_description(\"Processed: %d\" % (1 + i))\n pbar.update(TEST_BATCH_SIZE)", "Starting prediction (The computer can be unresponsive during the prediction).\nProcessed: 24001: 100%|██████████| 25000/25000 [07:52<00:00, 52.89it/s]\n" ], [ "y_pred = np.array(list(chain(*preds)))\nprint(\"Overall Accuracy:\", accuracy_score(y_test, y_pred))", "Overall Accuracy: 0.99148\n" ], [ "test_ixs[\"y_pred\"] = y_pred.astype(np.int)\ntest_ixs[\"y_test\"] = y_test\ntest_ixs = test_ixs.rename(columns={\"subject\": \"Class_ID\"})\ntest_data = test_ixs.merge(vgg_test, on=\"Class_ID\", how=\"left\")", "_____no_output_____" ], [ "attribute_map = [\n {\"skintype\": [\"type1\", \"type2\", \"type3\", \"type4\", \"type5\", \"type6\"]},\n {\"hairtype\": [\"straight\", \"wavy\", \"bald\", \"curly\"]},\n {\"haircolor\": [\"red\", \"black\", \"grey\", \"brown\", \"blonde\"]},\n {\"lips\": [\"small\", \"big\"]},\n {\"eye\": [\"normal\", \"narrow\"]},\n {\"nose\": [\"wide\", \"narrow\"]},\n]\nprint(\"Group initilization!\")\nfor attr in attribute_map:\n # for one value of one attribute\n for key, value in attr.items():\n for val in value:\n subgroup = test_data[test_data[key] == val]\n print(\n key,\n val,\n \":\",\n accuracy_score(subgroup[\"y_test\"], subgroup[\"y_pred\"]),\n subgroup.shape,\n )", "Group initilization!\nskintype type1 : 0.99875 (800, 19)\nskintype type2 : 0.9933673469387755 (9800, 19)\nskintype type3 : 0.9909345794392523 (10700, 19)\nskintype type4 : 0.9816666666666667 (2400, 19)\nskintype type5 : 0.9966666666666667 (900, 19)\nskintype type6 : 0.9925 (400, 19)\nhairtype straight : 0.9905501618122977 (15450, 19)\nhairtype wavy : 0.9930322580645161 (7750, 19)\nhairtype bald : 0.9914285714285714 (700, 19)\nhairtype curly : 0.9936363636363637 (1100, 19)\nhaircolor red : 0.995 (200, 19)\nhaircolor black : 0.9914093959731544 (7450, 19)\nhaircolor grey : 0.9873015873015873 (3150, 19)\nhaircolor brown : 0.991812865497076 (8550, 19)\nhaircolor blonde : 0.9932743362831858 (5650, 19)\nlips small : 0.9921763085399449 (18150, 19)\nlips big : 0.9896350364963503 (6850, 19)\neye normal : 0.9918651685393258 (22250, 19)\neye narrow : 0.9883636363636363 (2750, 19)\nnose wide : 0.9935779816513761 (10900, 19)\nnose narrow : 0.9898581560283688 (14100, 19)\n" ], [ "# y_pred = svm_model_linear.predict(X_test)", "_____no_output_____" ], [ "# features = np.load('features/unlearn_races_r50_feat_05ep.npz')\n# meta_data = pd.read_csv('metadata/VGGFace2_200_Subjects_Test_Images.csv')", "_____no_output_____" ], [ "# features = np.load('../FeatureEncodingsRFW/senet50_ft_features.npy')\n# train_ixs = pd.read_csv('../train_test_split/rfwtest_train_indexes.csv')\n# test_ixs = pd.read_csv('../train_test_split/rfwtest_test_indexes.csv')", "_____no_output_____" ], [ "features = features[\"arr_0\"]", "_____no_output_____" ], [ "feature_arr = np.asarray(features[:][:, :-1], dtype=np.float64)\npath_arr = features[:][:, -1]\n\nlabels = list(map(lambda x: x.split(\"/\")[0], path_arr))\nle = preprocessing.LabelEncoder()\nlabels = le.fit_transform(labels)\nX = pd.DataFrame(feature_arr)\ny = pd.Series(labels)\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)\nprint(\"SVM!\")\n\nsvm_model_linear = SVC(kernel=\"linear\", C=1).fit(X_train, y_train)\ny_pred = svm_model_linear.predict(X_test)\nprint(\"Overall Accuracy:\", accuracy_score(y_test.values, y_pred))\n\nprint(\"Group initilization!\")\ntest_pathes = path_arr[y_test.index.values]\nfor race in [\"African\", \"Asian\", \"Caucasian\", \"Indian\"]:\n for gender in [\"m\", \"f\"]:\n main_group = meta_data[\n (meta_data.race == race)\n & (meta_data.gender == gender)\n & (meta_data.generated_version == \"original\")\n ]\n group_file = main_group.filename.values\n indexes = []\n for el in group_file:\n loc = np.argwhere(test_pathes == el)\n if loc.size != 0:\n indexes.append(int(loc[0][0]))\n if len(indexes) > 0:\n indexes = np.asarray(indexes)\n print(race, gender)\n print(\n \" accuracy:%d %.3f\"\n % (\n len(y_test.values[indexes]),\n accuracy_score(y_test.values[indexes], y_pred[indexes]),\n )\n )", "_____no_output_____" ], [ "from sklearn.model_selection import GridSearchCV\n\nfeature_arr = np.asarray(features[:][:, :-1], dtype=np.float64)\npath_arr = features[:][:, -1]\n\nlabels = list(map(lambda x: x.split(\"/\")[0], path_arr))\nle = preprocessing.LabelEncoder()\nlabels = le.fit_transform(labels)\nX = pd.DataFrame(feature_arr)\ny = pd.Series(labels)\nparam_grid = [\n {\"C\": [1, 10, 100, 1000], \"kernel\": [\"linear\"]},\n {\"C\": [1, 10, 100, 1000], \"gamma\": [0.001, 0.0001], \"kernel\": [\"rbf\"]},\n]\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)\ngrid_search = GridSearchCV(SVC(), param_grid, cv=2)\nsvm_model = grid_search.fit(X_train, y_train)\nprint(grid_search.best_params_)\n\n# y_pred = svm_model.predict(X_test)\n# print('Overall Accuracy:',accuracy_score(y_test.values, y_pred))\n\n# print('Group initilization!')\n# test_pathes = path_arr[y_test.index.values]\n# for race in ['African','Asian','Caucasian', 'Indian']:\n# for gender in ['f','m']:\n# main_group = meta_data[(meta_data.race == race) & (meta_data.gender== gender) & (meta_data.generated_version== 'original') ]\n# group_file = main_group.filename.values\n# indexes = []\n# for el in group_file:\n# loc = np.argwhere(test_pathes==el)\n# if loc.size != 0:\n# indexes.append(int(loc[0][0]))\n# if len(indexes)>0:\n# indexes = np.asarray(indexes)\n# print(race,gender)\n# print(' accuracy:%d %.3f'%(len(y_test.values[indexes]), accuracy_score(y_test.values[indexes], y_pred[indexes])))", "_____no_output_____" ] ], [ [ "SVM!\n\nOverall Accuracy: 0.5139621028636558\n\nGroup initilization!\nAfrican m\n accuracy:1551 0.454\nAfrican f\n accuracy:1953 0.473\nAsian m\n accuracy:1610 0.496\nAsian f\n accuracy:1516 0.383\nCaucasian m\n accuracy:1797 0.559\nCaucasian f\n accuracy:1959 0.590\nIndian m\n accuracy:1964 0.615\nIndian f\n accuracy:1688 0.498", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
c52cc56e7c43e5eafb54fd472db6a9d13150a83c
40,402
ipynb
Jupyter Notebook
notebooks/solutions/15-Matlplotlib-Solutions.ipynb
ueapy/pythoncourse2021-materials
1bcb5c067dede20b9fa02ff6451cf564f436d76b
[ "MIT" ]
3
2021-09-30T05:59:49.000Z
2022-02-23T10:07:58.000Z
notebooks/solutions/15-Matlplotlib-Solutions.ipynb
ueapy/pythoncourse2021-materials
1bcb5c067dede20b9fa02ff6451cf564f436d76b
[ "MIT" ]
5
2021-06-07T11:53:09.000Z
2021-06-15T17:15:39.000Z
notebooks/solutions/15-Matlplotlib-Solutions.ipynb
ueapy/pythoncourse2021-materials
1bcb5c067dede20b9fa02ff6451cf564f436d76b
[ "MIT" ]
1
2021-06-11T08:24:45.000Z
2021-06-11T08:24:45.000Z
27.484354
326
0.54381
[ [ [ "# Matplotlib - Intro", "_____no_output_____" ], [ "* **matplotlib** is a Python plotting library for producing publication quality figures\n * allows for interactive, cross-platform control of plots\n * makes it easy to produce static raster or vector graphics\n * gives the developer complete control over the appearance of their plots, while still being usable through a powerful defaults system\n* standard scientific plotting library\n* online documentnation is on [matplotlib.org](https://matplotlib.org/index.html), with lots of examples in the [gallery](https://matplotlib.org/gallery.html)\n\n* behaves similarly to Matlab", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport numpy as np", "_____no_output_____" ] ], [ [ "To be efficient with **matplotlib**, you first need to understand its termonology.", "_____no_output_____" ], [ "## Parts of a Figure", "_____no_output_____" ], [ "<img src=\"../../figures/matplotlib_figure_parts.png\" style=\"height:90%; width:90%;\">", "_____no_output_____" ], [ "### Figure, Axes, Axis", "_____no_output_____" ], [ "* **Figure** is the whole image, the top-level 'container' that holds all objects of an image.\n* **Axes** is the region of a **Figure** that displays your data. Most plotting occurs here! Very similar to a subplot\n* **Axes** contains **Axis** objects (x axis,y axis) which control the data limits.\n* **Figure** can have any number of **Axes**, but to be useful should have at least one.", "_____no_output_____" ] ], [ [ "fig = plt.figure() # Create a figure\naxes = fig.add_subplot(111) # add one Axes to Figure", "_____no_output_____" ] ], [ [ "Usually an **Axes** is set up with a call to `fig.add_subplot()`, `plt.subplot()`, or `plt.subplots()` \n\nThe most flexible option is `plt.subplots()`", "_____no_output_____" ] ], [ [ "fig,axes = plt.subplots(2,3,figsize=(12,6))\n# This will create a figure and 6 axes arranged in 2 rows, 3 columns", "_____no_output_____" ] ], [ [ "### Line plots", "_____no_output_____" ], [ "Lets draw two cosine functions of different amplitude on the same **Axes**.", "_____no_output_____" ] ], [ [ "# Create data\nX = np.linspace(-np.pi, np.pi, 100, endpoint=True)\nY1 = np.cos(X)\nY2 = 2*np.cos(X)", "_____no_output_____" ], [ "# Plot data\nfig, axes = plt.subplots()\naxes.plot(X, Y1)\naxes.plot(X, Y2);", "_____no_output_____" ] ], [ [ "** Tip: by adding a semicolon at the end of a function, the output is suppressed.", "_____no_output_____" ], [ "### Default and named colors", "_____no_output_____" ], [ "![](../figures/dflt_style_changes-1.png)", "_____no_output_____" ], [ "**Exercise 0 (10 mins)**. The figure before is generated using the default settings. The code below shows these settings explicitly. Play with the values to explore their effect. For details on changing properties see [line plots on the matplotlib website](https://matplotlib.org/api/_as_gen/matplotlib.pyplot.plot.html)", "_____no_output_____" ] ], [ [ "# Plot data (with explicit plotting settings)\nfig, axes = plt.subplots(nrows=1, ncols=1, figsize=(6,4))\naxes.plot(X, Y1, color='C0', linewidth=5.0, linestyle='-',alpha=0.5)\naxes.plot(X, Y2, color='r', linewidth=3.0, linestyle='--')", "_____no_output_____" ], [ "# Your code here", "_____no_output_____" ], [ "# Sample solution\n# Plot data (with explicit plotting settings)\nfig, axes = plt.subplots(nrows=1, ncols=1, figsize=(8,4))\naxes.plot(X, Y1, color='r', linewidth=4, linestyle='--')\naxes.plot(X, Y2, color='b', linewidth=2, linestyle='-.')\naxes.set_xlim(-8, 8)\naxes.set_ylim(-3, 3)\naxes.set_xticks(np.linspace(-4,4,9,endpoint=True))\naxes.set_yticks(np.linspace(-3,3,11,endpoint=True));", "_____no_output_____" ] ], [ [ "**Exercise 1 (10 mins)**. Having integer numbers on the x axis here might divert reader's attention from the critical points of the graph.\n\n1. Change **xticks** and **xticklabels** into multiples of $\\pi$. Use `axes.set_xticks()` and `axes.set_xticklabels()`.\n\n\\*\\* Tip: use `np.pi` for **xticks** and '\\$\\pi$' for **xticklabels**. format strings in LaTeX by prepending 'r'ie `axes.set_xticklabels([r'$\\pi$'])`", "_____no_output_____" ] ], [ [ "# Your code here", "_____no_output_____" ], [ "# Solution\n# Change xticks, yticks and xticklabels\nfig, axes = plt.subplots()\naxes.plot(X, Y1);\naxes.plot(X, Y2);\n\naxes.set_xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi]);\naxes.set_yticks([-2, -1, 0, 1, 2]);\naxes.set_xticklabels(['$-\\pi$', '$-\\pi/2$', '$0$', '$+\\pi/2$', '$+\\pi$']);", "_____no_output_____" ] ], [ [ "**Exersise 2 (5 mins)**. Add a legend.\n1. Give both cosine functions a name by adding an extra keyword argument, a label, to `axes.plot()`.\n2. Add a legend object to **Axes**. ", "_____no_output_____" ] ], [ [ "# Your code here", "_____no_output_____" ], [ "# Solution\n# Add a legend\nfig, axes = plt.subplots()\naxes.plot(X, Y1, label='cos(x)');\naxes.plot(X, Y2, label='2cos(x)');\n\naxes.set_xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi]);\naxes.set_yticks([-2, -1, 0, 1, 2]);\naxes.set_xticklabels(['$-\\pi$', '$-\\pi/2$', '$0$', '$+\\pi/2$', '$+\\pi$']);\naxes.legend(loc='upper left', frameon=False);\n", "_____no_output_____" ] ], [ [ "**Exercise 3 (10 mins)**. Annotate an interesting point on a graph, for example, $2\\cos(\\frac{\\pi}{4})$.\n1. Add a single point to the graph by using `axes.plot(..., marker='o')`.\n2. Use `axes.annotate(s, xy=..., xytext=...)` to add annotation.\n\n** Tip: visit [annotations](https://matplotlib.org/users/annotations_intro.html).", "_____no_output_____" ] ], [ [ "# Your code here", "_____no_output_____" ], [ "fig, axes = plt.subplots()\naxes.plot(X, Y1, label='cos(x)');\naxes.plot(X, Y2, label='2cos(x)');\n\naxes.set_xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi]);\naxes.set_yticks([-2, -1, 0, 1, 2]);\naxes.set_xticklabels(['$-\\pi$', '$-\\pi/2$', '$0$', '$+\\pi/2$', '$+\\pi$']);\naxes.legend(loc='upper left', frameon=False);\npoint = np.pi/4\naxes.plot(point, 2*np.cos(point), marker='o');\naxes.annotate(r'$2\\cos(\\frac{\\pi}{4})=\\sqrt{2}$', xy=(point, 2*np.cos(point)), xytext=(1, 1.5), fontsize=16);", "_____no_output_____" ] ], [ [ "### Demonstration of bar() on NAO index data", "_____no_output_____" ], [ "Bar plots are created in much the same way as line plots, with two arrays of equal size.\n\nHere we use `bar()` to plot the data on North Atlantic oscillation from the NWS Climate Prediction Center. \n\nData source: http://www.cpc.ncep.noaa.gov/products/precip/CWlink/pna/nao.shtml\n\nVariable: monthly mean NAO index since January 1950 til March 2019.\n\nData stored in text file in the following way:\n\nYear | Month | Value\n\n1950 1 0.92000E+00\n\n# Read NAO data\n", "_____no_output_____" ] ], [ [ "nao_yr, nao_mn, nao_val = np.loadtxt('../../data/nao_monthly.txt', unpack=True)", "_____no_output_____" ], [ "# Quick look at the data\nfig, ax = plt.subplots(figsize=(25, 5))\nax.plot(nao_val);", "_____no_output_____" ] ], [ [ "Let's focus on the last 5 years and slice `nao_yr`, `nao_mn`, `nao_val` arrays accordingly.", "_____no_output_____" ] ], [ [ "# Slicing\nnao_yr_sub = nao_yr[-12*5:]\nnao_mn_sub = nao_mn[-12*5:]\nnao_val_sub = nao_val[-12*5:]", "_____no_output_____" ], [ "# Create an array of month numbers\nnao_time = np.arange(len(nao_val_sub))\nnao_time", "_____no_output_____" ], [ "# Plot bar\nfig, ax = plt.subplots(figsize=(15,4))\nax.bar(nao_time, nao_val_sub)\nax.set_title('NAO index')\nax.grid(True)", "_____no_output_____" ] ], [ [ "### Scatter plots", "_____no_output_____" ], [ "* display data as a collection of points, each having the value of one variable determining the position on the horizontal axis and the value of the other variable determining the position on the vertical axis\n* colorcode the data points to display an additional variable\n* good for non-gridded data", "_____no_output_____" ], [ "`scatter(x, y, s=None, c=None, marker=None, cmap=None, norm=None, vmin=None, vmax=None, alpha=None, edgecolors=None, **kwargs)`", "_____no_output_____" ] ], [ [ "# Generate some data (circles of random diameter)\nN = 50\nx = np.random.rand(N)\ny = np.random.rand(N)\narea = np.pi*(15*np.random.rand(N))**2 # 0 to 15 point radii\ncolors = np.random.rand(N)", "_____no_output_____" ], [ "# Plot scatter\nplt.scatter(x, y, s=area, c=colors);", "_____no_output_____" ] ], [ [ "### Multiple subplots", "_____no_output_____" ], [ "`plt.subplots()` is a function that creates a figure and a grid of subplots with a single call, while providing reasonable control over how the individual plots are created. ", "_____no_output_____" ] ], [ [ "fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(15,5)) # or plt.subplots(2,3,figsize=(15,5))\naxes[0,0].set_title('subplot[0,0]', fontsize=18);\naxes[0,1].set_title('subplot[0,1]', fontsize=18);\naxes[0,2].set_title('subplot[0,2]', fontsize=18);\naxes[1,0].set_title('subplot[1,0]', fontsize=18);\naxes[1,1].set_title('subplot[1,1]', fontsize=18);\naxes[1,2].set_title('subplot[1,2]', fontsize=18);\n\nfor ax in axes.flat: # you can loop over axes\n ax.set_xticks([]);\n ax.set_yticks([]);", "_____no_output_____" ] ], [ [ "### Subplots with real data", "_____no_output_____" ], [ "To practice our plotting we are going to work with data from the NOAA ESRL Carbon Cycle Cooperative Global Air Sampling Network.\n\nSource: https://www.esrl.noaa.gov/gmd/dv/data/\n\nMonthly averages of atmospheric carbon dioxide ($CO_2$) and methane ($CH_4$) \n\nStations: \n* CGO = Cape Grim, Tasmania, Australia\n* MHD = Mace Head, County Galway, Ireland\n\nUnits:\n* $CO_2$ - ppm\n* $CH_4$ - ppb\n\nData stored in a text file. The top row states the number of header lines in the file. No title headers. The actual data is ogranized as following:\n\n|Station code | Year | Month | Measurement|\n| :------------- | :----------: | :----------| :---------- |\n|CGO | 1984 | 4 | 341.63 |", "_____no_output_____" ], [ "#### Read data from a text file", "_____no_output_____" ], [ "The simplest way to load data from a text file in `numpy` is to use `np.loadtxt()` function.", "_____no_output_____" ] ], [ [ "# np.loadtxt() # hit Shift+Tab+Tab", "_____no_output_____" ] ], [ [ "This function has a lot parameters that you can adjuct to fit your data format. Here we use only:\n\n`np.loadtxt(fname, skiprows=..., usecols=..., unpack=...)`", "_____no_output_____" ] ], [ [ "data = np.loadtxt('../../data/co2_cgo_surface-flask_1_ccgg_month.txt', skiprows=68, usecols=(1, 2, 3))\ndata", "_____no_output_____" ] ], [ [ "If we want to have three separate arrays for year, month and value, we can set `unpack=True` and store the output from `np.loadtxt()` function in three separate arrays.", "_____no_output_____" ] ], [ [ "year, month, value = np.loadtxt('../../data/co2_cgo_surface-flask_1_ccgg_month.txt', skiprows=68, usecols=(1, 2, 3), unpack=True)", "_____no_output_____" ], [ "year[0:8]", "_____no_output_____" ], [ "month[0:8]", "_____no_output_____" ], [ "value[0:8]", "_____no_output_____" ] ], [ [ "#### Kwargs", "_____no_output_____" ], [ "* remember from yesterday, you can store any number of keyword arguments in a dictionary, and later unpack it when calling a function", "_____no_output_____" ] ], [ [ "# Kwargs\nread_data_kwargs = dict(skiprows=68, usecols=(1, 2, 3), unpack=True)", "_____no_output_____" ], [ "# Read data\n# CO2 \ncgo_co2_yr, cgo_co2_mn, cgo_co2_val = np.loadtxt('../../data/co2_cgo_surface-flask_1_ccgg_month.txt', **read_data_kwargs)\nmhd_co2_yr, mhd_co2_mn, mhd_co2_val = np.loadtxt('../../data/co2_mhd_surface-flask_1_ccgg_month.txt', **read_data_kwargs)\n# CH4\ncgo_ch4_yr, cgo_ch4_mn, cgo_ch4_val = np.loadtxt('../../data/ch4_cgo_surface-flask_1_ccgg_month.txt', **read_data_kwargs)\nmhd_ch4_yr, mhd_ch4_mn, mhd_ch4_val = np.loadtxt('../../data/ch4_mhd_surface-flask_1_ccgg_month.txt', **read_data_kwargs)\n", "_____no_output_____" ] ], [ [ "We'll find out how to properly plot on a time axis soon! For now, create dummy time arrays by some arithmetic to numpy arrays.", "_____no_output_____" ] ], [ [ "cgo_co2_time_dummy = cgo_co2_yr*12 + cgo_co2_mn\nmhd_co2_time_dummy = mhd_co2_yr*12 + mhd_co2_mn\ncgo_ch4_time_dummy = cgo_ch4_yr*12 + cgo_ch4_mn\nmhd_ch4_time_dummy = mhd_ch4_yr*12 + mhd_ch4_mn", "_____no_output_____" ] ], [ [ "**Exercise 4a (20 mins)**. Construct two subplots using the arrays created above. Add titles, x and y labels, legend. If you have time, play with optional arguments of `plot()` and try to use **kwargs**.", "_____no_output_____" ], [ "The desired outcome is something like this (time on x axis will follow in part b):\n\n<img src=\"../../figures/subplots_example.png\">", "_____no_output_____" ] ], [ [ "# Your code here", "_____no_output_____" ], [ "# Solution\n# plt.rcParams['mathtext.default'] = 'regular'\ncgo_kwargs = dict(label='Cape Grim', color='C3', linestyle='-')\nmhd_kwargs = dict(label='Mace Head', color='C7', linestyle='-')\n\nfig, axes = plt.subplots(nrows=2, figsize=(9,9), sharex=True)\n\naxes[0].plot(cgo_co2_time_dummy, cgo_co2_val, **cgo_kwargs)\naxes[1].plot(cgo_ch4_time_dummy, cgo_ch4_val, **cgo_kwargs)\n\naxes[0].plot(mhd_co2_time_dummy, mhd_co2_val, **mhd_kwargs)\naxes[1].plot(mhd_ch4_time_dummy, mhd_ch4_val, **mhd_kwargs)\n\naxes[0].set_title('$CO_{2}$')\naxes[1].set_title('$CH_{4}$')\n\naxes[0].set_ylabel('ppm')\naxes[1].set_ylabel('ppb')\n\naxes[0].legend();\n#fig.savefig('../../figures/subplots_example.png',bbox_inches='tight')", "_____no_output_____" ] ], [ [ "#### Datetime\n\n* `datetime` module helps to work with time arrays", "_____no_output_____" ] ], [ [ "from datetime import datetime", "_____no_output_____" ], [ "datetime.now()", "_____no_output_____" ], [ "a_date = datetime(2019, 5, 23)", "_____no_output_____" ], [ "a_date", "_____no_output_____" ], [ "python_course_dates = [datetime(2019, 5, i) for i in [22, 23, 24]]", "_____no_output_____" ], [ "python_course_dates", "_____no_output_____" ] ], [ [ "Let's apply it to our arrays.", "_____no_output_____" ] ], [ [ "# Using list comprehension\ncgo_co2_time = [datetime(int(i), int(j), 1) for i, j in zip(cgo_co2_yr, cgo_co2_mn)]", "_____no_output_____" ], [ "# Same as in previous cell but using a for loop\ncgo_co2_time = []\nfor i, j in zip(cgo_co2_yr, cgo_co2_mn):\n cgo_co2_time.append(datetime(int(i), int(j), 1))", "_____no_output_____" ], [ "mhd_co2_time = [datetime(int(i), int(j), 1) for i, j in zip(mhd_co2_yr, mhd_co2_mn)]\n\n\n\ncgo_ch4_time = [datetime(int(i), int(j), 1) for i, j in zip(cgo_ch4_yr, cgo_ch4_mn)]\nmhd_ch4_time = [datetime(int(i), int(j), 1) for i, j in zip(mhd_ch4_yr, mhd_ch4_mn)]\n", "_____no_output_____" ] ], [ [ "<b>Exercise 4b.</b> Improve your solution to exercise 4a by using the newly created datetime arrays. Note how matplotlib understands the datetime format. ", "_____no_output_____" ] ], [ [ "# Solution\n# plt.rcParams['mathtext.default'] = 'regular'\n\ncgo_kwargs = dict(label='Cape Grim', color='C3', linestyle='-')\nmhd_kwargs = dict(label='Mace Head', color='C7', linestyle='-')\n\nfig, axes = plt.subplots(nrows=2, figsize=(9,9), sharex=True)\n\naxes[0].plot(cgo_co2_time, cgo_co2_val, **cgo_kwargs)\naxes[1].plot(cgo_ch4_time, cgo_ch4_val, **cgo_kwargs)\n\naxes[0].plot(mhd_co2_time, mhd_co2_val, **mhd_kwargs)\naxes[1].plot(mhd_ch4_time, mhd_ch4_val, **mhd_kwargs)\n\naxes[0].set_title('$CO_{2}$')\naxes[1].set_title('$CH_{4}$')\n\naxes[0].set_ylabel('ppm')\naxes[1].set_ylabel('ppb')\n\naxes[0].legend();\n#fig.savefig('../../figures/subplots_example.png',bbox_inches='tight')", "_____no_output_____" ] ], [ [ "---\n---\n\n## Plotting 2D data: contour (and contourf) plots", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport numpy as np", "_____no_output_____" ] ], [ [ "* `contour()` and `contourf()` draw contour lines and filled contours, respectively\n* good for 2D gridded data\n\n** Note: `contourf()` differs from the Matlab version in that it does not draw the polygon edges. To draw edges, add line contours with calls to `contour()`.", "_____no_output_____" ], [ "`contour(Z)` - make a contour plot of an array Z. The level values are chosen automatically, axes will refer to indices in the array.\n\n`contour(X, Y, Z)` - X, Y specify the (x, y) coordinates of the surface\n\n`contour(X, Y, Z, N)` - contour up to N automatically-chosen levels\n\n`contour(X, Y, Z, [level1, level2])` - contour on specific levels, e.g. level1, level2. ", "_____no_output_____" ] ], [ [ "# Let's create a function to generate some data\ndef fun(x,y):\n return (1-x/2+x**5+y**3)*np.exp(-x**2-y**2) ", "_____no_output_____" ], [ "# Create a regular (x,y) grid\nn = 200\nx1d = np.linspace(-3,3,n)\ny1d = np.linspace(-3,3,n)\nX, Y = np.meshgrid(x1d, y1d) # repeat x y times and y x times", "_____no_output_____" ], [ "# Calculate the data\ndata = fun(X,Y)", "_____no_output_____" ], [ "# A simple example first: \nplt.contour(data);\n#plt.contour(X, Y, data);", "_____no_output_____" ], [ "# Plot subplots using contour and contourf\nfig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(15, 4))\nax1.contour(X, Y, data, 10);\nax2.contourf(X, Y, data, 10);\n#ax3.contour(X, Y, data, 10, colors='k');\n#ax3.contourf(X, Y, data, 10);", "_____no_output_____" ] ], [ [ "### How to add a colorbar?", "_____no_output_____" ], [ "When adding a **colorbar**, it needs to know the relevant *axes* and *mappable* content - especially when working with subplots or layered figures. \n\nNote that a colorbar will also have its own axes properties... \n\nWe tell matplotlib which plotted values to use for the colorbar content with: `fig.colorbar(mappable, ax=ax_no)`", "_____no_output_____" ] ], [ [ "# Plot contour and contourf with colorbars\n# By default matplotlib contours negative values with a dashed line. This behaviour can be changed with rcparams:\n#plt.rcParams['contour.negative_linestyle']= 'solid' # Reset to default with `= 'dashed'`\n\nfig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(15, 4))\nax1.contour(X, Y, data, 10)\nmappable2 = ax2.contourf(X, Y, data, 10)\n#mappable2.set_clim(0,1)\n\nax3.contour(X, Y, data, 10, colors='k')\nmappable3 = ax3.contourf(X, Y, data, 10)\n\nfig.colorbar(mappable2, ax=ax2)\nfig.colorbar(mappable3, ax=ax3);", "_____no_output_____" ] ], [ [ "#### Mini exercise: 10 min\n\nPlay around with the lines of code in the cell above, and see how the figure changes, e.g. \n\n* What happens if you try to add a colorbar to ax1? \n\n *Answer: Lines appear rather than blocks of colour.* \n\n* Try plotting chosen contour levels for ax2 or ax3, and see what happens to the colorbar? \n\n *Answer: The max/min levels will set the limits of your colorbar. Values outside these limits will appear white unless you specify `extend='max'`, `'min'` or `'both'`.* \n", "_____no_output_____" ] ], [ [ "# Examples: \nfig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(15, 4))\nmappable1 = ax1.contour(X, Y, data, 10)\nmappable2 = ax2.contourf(X, Y, data, levels=np.linspace(-0.5,0.5,11))\n#mappable2.set_clim(0,1)\n\nax3.contour(X, Y, data, 10, colors='k')\nmappable3 = ax3.contourf(X, Y, data, 10, levels=np.linspace(-0.5,0.5,11), extend='both')\n\nfig.colorbar(mappable1, ax=ax1)\nfig.colorbar(mappable2, ax=ax2)\nfig.colorbar(mappable3, ax=ax3);", "_____no_output_____" ] ], [ [ "---\n\n## Final matplotlib exercise (20 mins)", "_____no_output_____" ], [ "Reproduce the figure below by using `contourf()` to show a map of sea surface temperature, and `plot()` for a zonally averaged temperature curve. \n\nThe code for loading and processing the data is provided below, so you can focus on producing the figure...\n", "_____no_output_____" ], [ "Data source: https://podaac-tools.jpl.nasa.gov/las/UI.vm\n\nDataset: AMSR-E Level 3 Sea Surface Temperature for Climate Model Comparison.\n\nVariable: Sea Surface Temperature (K).\n\nTime : 16-JUN-2002 00:00.\n\nSpacial resolution: 1$^{\\circ}$x1$^{\\circ}$, 361 by 180 points (longitude by latitude).\n\nTotal Number of Records: 64980.", "_____no_output_____" ], [ "All the data processing is handled for you here, with the following steps: \n* Read the data using `np.genfromtxt` (very similar to `np.loadtxt`, but can handle missing values). \n* Reshape the 1D data into a 2D lat-lon grid. \n* Calculate the zonal-mean temperature. ", "_____no_output_____" ] ], [ [ "# Read modelling sst data\nlon_raw, lat_raw, sst_raw = np.genfromtxt('../../data/AMSR-E_Level_3_Sea_Surface_Temperature_for_Climate_Model_Comparison.csv', \n delimiter=',', skip_header=10, missing_values='-1.E+34', \n usemask=True, usecols=(2, 3, 4), unpack=True)\n\n# Reshape into a grid of sst with corresponding lat and lon coordinates\nlon = np.unique(lon_raw)\nlat = np.unique(lat_raw)\nsst = np.reshape(sst_raw,(len(lat),len(lon)))\n\n# Calculate the zonal-mean temperature here\ntemp_zonal_mean = np.nanmean(sst,1);", "_____no_output_____" ] ], [ [ "Now, plot the data...", "_____no_output_____" ] ], [ [ "# Example Solution\n# == Increasing the font size to improve readability ==\nplt.rcParams.update({\"font.size\": 20})\n\n# == set the subplot layout ==\nfig, ax = plt.subplots(1, 2, figsize=(12,8), \n gridspec_kw={\"width_ratios\":[8, 1]}, sharey = True)\nfig.subplots_adjust(wspace=0.05)\n\n# plot the map on axis 0\ncb = ax[0].contourf(lon, lat, sst, cmap='inferno')\nax[0].set_title('Sea surface temperature 16 June 2002')\nax[0].set(xlabel='Longitude', ylabel='Latitude', ylim = [-80,80])\n\n# plot the zonal mean on axis 1\nax[1].plot(temp_zonal_mean, lat)\nax[1].set(xlabel='Mean temp')\n\n# create a separate whole-width axis (cbar_ax) for the colorbar at the bottom\n# -> set position relative to other axes.\nfig.subplots_adjust(bottom=0.2)\npos0 = ax[0].get_position() # [x0=left, y0=bottom, x1=right, y1=top]\npos1 = ax[1].get_position()\ncbar_ax = fig.add_axes([pos0.x0, 0.08, pos1.x1-pos0.x0, 0.03]) # [left, bottom, width, height]\nfig.colorbar(cb, cax=cbar_ax, label=r\"Temperature (K)\", orientation='horizontal');\n\n#fig.savefig('../../figures/matplotlib_map.png', dpi=300, bbox_inches='tight')", "_____no_output_____" ] ], [ [ "**NB. There is no \"right\" answer - you may find other ways to produce the same figure (or something better!)** ", "_____no_output_____" ], [ "## References: \n* https://matplotlib.org/faq/usage_faq.html\n* http://www.labri.fr/perso/nrougier/teaching/matplotlib/matplotlib.html\n* https://matplotlib.org/stable/index.html\n * https://matplotlib.org/stable/gallery/index.html\n* https://github.com/matplotlib/cheatsheets", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
c52ccf977ff502cd7bb7d295574ca7184075ec60
23,237
ipynb
Jupyter Notebook
4-Logistic2DGradient.ipynb
Ursinus-CS477-F2021/Week10_LogisticRegression
010b07b58f8b1e78f83d28677dd06668778b1a85
[ "Apache-2.0" ]
null
null
null
4-Logistic2DGradient.ipynb
Ursinus-CS477-F2021/Week10_LogisticRegression
010b07b58f8b1e78f83d28677dd06668778b1a85
[ "Apache-2.0" ]
null
null
null
4-Logistic2DGradient.ipynb
Ursinus-CS477-F2021/Week10_LogisticRegression
010b07b58f8b1e78f83d28677dd06668778b1a85
[ "Apache-2.0" ]
null
null
null
122.94709
11,776
0.84955
[ [ [ "import numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "logistic = lambda x: 1/(1+np.exp(-x))\n\nnp.random.seed(0)\nX1 = np.random.randn(20, 2)+np.array([4, 4])\nX2 = np.random.randn(20, 2)+np.array([1, 2])\n\nplt.scatter(X1[:, 0], X1[:, 1])\nplt.scatter(X2[:, 0], X2[:, 1])\nplt.axis(\"equal\")\n\ndef get_logistic_loss(X1, X2, a, b, c):\n return np.sum(logistic(a*X1[:, 0]+b*X1[:, 1] + c)**2) + np.sum((1-logistic(a*X2[:, 0]+b*X2[:, 1] + c))**2)\n\ndef plot_logistic_regression_predictions(X1, X2, a, b, c):\n plt.scatter(X1[:, 0], X1[:, 1])\n plt.scatter(X2[:, 0], X2[:, 1])\n X = np.concatenate((X1, X2), axis=0)\n xmin = np.min(X, axis=0)\n xmax = np.max(X, axis=0)\n iv = max(xmax[1]-xmin[1], xmax[0]-xmin[0])\n \n p0 = -c*np.array([a, b])/(a**2 + b**2)\n v = np.array([-b, a])\n mag = np.sqrt(np.sum(v**2))\n if mag > 0:\n v = v/mag\n p = p0 - 2*iv*v\n q = p0 + 2*iv*v\n plt.plot([p[0], q[0]], [p[1], q[1]])\n rg = xmax[0] - xmin[0]\n plt.xlim([xmin[0]-0.2*rg, xmax[0]+0.2*rg])\n rg = xmax[1] - xmin[1]\n plt.ylim([xmin[1]-0.2*rg, xmax[1]+0.2*rg])\n\n wrong = 0\n for x in X1:\n y = logistic(a*x[0] + b*x[1] + c)\n proj = p0 + np.sum(v*(x-p0))*v\n plt.plot([x[0], proj[0]], [x[1], proj[1]], c='C0')\n if y > 0.5:\n plt.scatter([x[0]], [x[1]], 200, c='C0', marker='x')\n wrong += 1\n for x in X2:\n y = logistic(a*x[0] + b*x[1] + c)\n proj = p0 + np.sum(v*(x-p0))*v\n plt.plot([x[0], proj[0]], [x[1], proj[1]], c='C1')\n if y < 0.5:\n plt.scatter([x[0]], [x[1]], 200, c='C1', marker='x')\n wrong += 1\n loss = get_logistic_loss(X1, X2, a, b, c)\n N = X.shape[0]\n plt.title(\"a = {:.3f}, b = {:.3f}, c = {:.3f}\\nLoss = {:.3f}, {} Wrong ({} % Accuracy)\".format(a, b, c, loss, wrong, int(100*(N-wrong)/N)))\n plt.axis(\"equal\")", "_____no_output_____" ], [ "losses = []\nsteps = []\nstep = 0.01\nn_iters = 150\na = 0\nb = 0\nc = 0\nX = np.concatenate((X1, X2))\ny = np.concatenate((np.zeros(X1.shape[0]), np.ones(X2.shape[0])))\nplt.figure(figsize=(10, 5))\nfor it in range(n_iters):\n ## TODO: Update a, b, and c with Gradient descent\n f = logistic(a*X[:, 0] + b*X[:, 1] + c)\n ## TODO: Fill this in to perform gradient descent on a, b, and c\n steps.append([a, b])\n loss = get_logistic_loss(X1, X2, a, b, c)\n losses.append(loss)\n \nplt.clf()\nplt.subplot(121)\nplot_logistic_regression_predictions(X1, X2, a, b, c)\nplt.xlim([-1, 7])\nplt.ylim([-1, 7])\nplt.subplot(122)\nplt.plot(losses)\nplt.xlim([0, n_iters])\nplt.ylim([0, np.max(losses)])\nplt.title(\"Loss\")\nplt.xlabel(\"Iteration\")", "<ipython-input-2-d393a34b1ef4>:22: RuntimeWarning: invalid value encountered in true_divide\n p0 = -c*np.array([a, b])/(a**2 + b**2)\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
c52cd26d4a6d368661a93264fedf43e23e507b90
10,969
ipynb
Jupyter Notebook
tasks/AnatomyOfMatplotlib/AnatomyOfMatplotlib-Part5-Artists.ipynb
shmalex/galvanize
9f3e0b1a66eed23ace5839f2c29d3627bbc2120e
[ "MIT" ]
1
2020-12-16T09:36:23.000Z
2020-12-16T09:36:23.000Z
tasks/AnatomyOfMatplotlib/AnatomyOfMatplotlib-Part5-Artists.ipynb
shmalex/galvanize
9f3e0b1a66eed23ace5839f2c29d3627bbc2120e
[ "MIT" ]
null
null
null
tasks/AnatomyOfMatplotlib/AnatomyOfMatplotlib-Part5-Artists.ipynb
shmalex/galvanize
9f3e0b1a66eed23ace5839f2c29d3627bbc2120e
[ "MIT" ]
null
null
null
33.039157
645
0.560489
[ [ [ "# Let printing work the same in Python 2 and 3\nfrom __future__ import print_function\n# Turning on inline plots -- just for use in ipython notebooks.\nimport matplotlib\nmatplotlib.use('nbagg')\nimport numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "# Artists\nAnything that can be displayed in a Figure is an [`Artist`](http://matplotlib.org/users/artists.html). There are two main classes of Artists: primatives and containers. Below is a sample of these primitives.", "_____no_output_____" ] ], [ [ "\"\"\"\nShow examples of matplotlib artists\nhttp://matplotlib.org/api/artist_api.html\n\nSeveral examples of standard matplotlib graphics primitives (artists)\nare drawn using matplotlib API. Full list of artists and the\ndocumentation is available at\nhttp://matplotlib.org/api/artist_api.html\n\nCopyright (c) 2010, Bartosz Telenczuk\n\nLicense: This work is licensed under the BSD. A copy should be\nincluded with this source code, and is also available at\nhttp://www.opensource.org/licenses/bsd-license.php\n\"\"\"\n\nfrom matplotlib.collections import PatchCollection\nimport matplotlib.path as mpath\nimport matplotlib.patches as mpatches\nimport matplotlib.lines as mlines\n\nfig, ax = plt.subplots(1, 1, figsize=(7,7))\n\n# create 3x3 grid to plot the artists\npos = np.mgrid[0.2:0.8:3j, 0.2:0.8:3j].reshape(2, -1)\npatches = []\n\n# add a circle\nart = mpatches.Circle(pos[:, 0], 0.1, ec=\"none\")\npatches.append(art)\nplt.text(pos[0, 0], pos[1, 0] - 0.15, \"Circle\", ha=\"center\", size=14)\n\n# add a rectangle\nart = mpatches.Rectangle(pos[:, 1] - [0.025, 0.05], 0.05, 0.1, ec=\"none\")\npatches.append(art)\nplt.text(pos[0, 1], pos[1, 1] - 0.15, \"Rectangle\", ha=\"center\", size=14)\n\n# add a wedge\nwedge = mpatches.Wedge(pos[:, 2], 0.1, 30, 270, ec=\"none\")\npatches.append(wedge)\nplt.text(pos[0, 2], pos[1, 2] - 0.15, \"Wedge\", ha=\"center\", size=14)\n\n# add a Polygon\npolygon = mpatches.RegularPolygon(pos[:, 3], 5, 0.1)\npatches.append(polygon)\nplt.text(pos[0, 3], pos[1, 3] - 0.15, \"Polygon\", ha=\"center\", size=14)\n\n#add an ellipse\nellipse = mpatches.Ellipse(pos[:, 4], 0.2, 0.1)\npatches.append(ellipse)\nplt.text(pos[0, 4], pos[1, 4] - 0.15, \"Ellipse\", ha=\"center\", size=14)\n\n#add an arrow\narrow = mpatches.Arrow(pos[0, 5] - 0.05, pos[1, 5] - 0.05, 0.1, 0.1, width=0.1)\npatches.append(arrow)\nplt.text(pos[0, 5], pos[1, 5] - 0.15, \"Arrow\", ha=\"center\", size=14)\n\n# add a path patch\nPath = mpath.Path\nverts = np.array([\n (0.158, -0.257),\n (0.035, -0.11),\n (-0.175, 0.20),\n (0.0375, 0.20),\n (0.085, 0.115),\n (0.22, 0.32),\n (0.3, 0.005),\n (0.20, -0.05),\n (0.158, -0.257),\n ])\nverts = verts - verts.mean(0)\ncodes = [Path.MOVETO,\n Path.CURVE4, Path.CURVE4, Path.CURVE4, Path.LINETO,\n Path.CURVE4, Path.CURVE4, Path.CURVE4, Path.CLOSEPOLY]\n\npath = mpath.Path(verts / 2.5 + pos[:, 6], codes)\npatch = mpatches.PathPatch(path)\npatches.append(patch)\nplt.text(pos[0, 6], pos[1, 6] - 0.15, \"PathPatch\", ha=\"center\", size=14)\n\n# add a fancy box\nfancybox = mpatches.FancyBboxPatch(\n pos[:, 7] - [0.025, 0.05], 0.05, 0.1,\n boxstyle=mpatches.BoxStyle(\"Round\", pad=0.02))\npatches.append(fancybox)\nplt.text(pos[0, 7], pos[1, 7] - 0.15, \"FancyBoxPatch\", ha=\"center\", size=14)\n\n# add a line\nx,y = np.array([[-0.06, 0.0, 0.1], [0.05,-0.05, 0.05]])\nline = mlines.Line2D(x+pos[0, 8], y+pos[1, 8], lw=5.)\nplt.text(pos[0, 8], pos[1, 8] - 0.15, \"Line2D\", ha=\"center\", size=14)\n\ncollection = PatchCollection(patches)\nax.add_collection(collection)\nax.add_line(line)\nax.set_axis_off()\n\nplt.show()", "_____no_output_____" ] ], [ [ "Containers are objects like *Figure* and *Axes*. Containers are given primitives to draw. The plotting functions we discussed back in Parts 1 & 2 are convenience functions that generate these primitives and places them into the appropriate containers. In fact, most of those functions will return artist objects (or a list of artist objects) as well as store them into the appropriate axes container.\n\nAs discussed in Part 3, there is a wide range of properties that can be defined for your plots. These properties are processed and applied to their primitives. Ultimately, you can override anything you want just by directly setting a property to the object itself.", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(1, 1)\nlines = plt.plot([1, 2, 3, 4], [1, 2, 3, 4], 'b', [1, 2, 3, 4], [4, 3, 2, 1], 'r')\nlines[0].set(linewidth=5)\nlines[1].set(linewidth=10, alpha=0.7)\nplt.show()", "_____no_output_____" ] ], [ [ "To see what properties are set for an artist, use [`getp()`](http://matplotlib.org/api/artist_api.html#matplotlib.artist.getp)", "_____no_output_____" ] ], [ [ "fig = plt.figure()\nprint(plt.getp(fig.patch))\nplt.close(fig)", "_____no_output_____" ] ], [ [ "# Collections\nIn addition to the Figure and Axes containers, there is another special type of container called a [`Collection`](http://matplotlib.org/api/collections_api.html). A Collection usually contains a list of primitives of the same kind that should all be treated similiarly. For example, a [`CircleCollection`](http://matplotlib.org/api/collections_api.html#matplotlib.collections.CircleCollection) would have a list of [`Circle`](http://matplotlib.org/api/artist_api.html#matplotlib.patches.Circle) objects all with the same color, size, and edge width. Individual property values for artists in the collection can also be set (in some cases).", "_____no_output_____" ] ], [ [ "from matplotlib.collections import LineCollection\nfig, ax = plt.subplots(1, 1)\n# A collection of 3 lines\nlc = LineCollection([[(4, 10), (16, 10)],\n [(2, 2), (10, 15), (6, 7)],\n [(14, 3), (1, 1), (3, 5)]])\nlc.set_color('r')\nlc.set_linewidth(5)\nax.add_collection(lc)\nax.set_xlim(0, 18)\nax.set_ylim(0, 18)\nplt.show()", "_____no_output_____" ], [ "# Now set individual properties in a collection\nfig, ax = plt.subplots(1, 1)\nlc = LineCollection([[(4, 10), (16, 10)],\n [(2, 2), (10, 15), (6, 7)],\n [(14, 3), (1, 1), (3, 5)]])\nlc.set_color(['r', 'blue', (0.2, 0.9, 0.3)])\nlc.set_linewidth([4, 3, 6])\nax.add_collection(lc)\nax.set_xlim(0, 18)\nax.set_ylim(0, 18)\nplt.show()", "_____no_output_____" ] ], [ [ "There are other kinds of collections that are not just simply a list of primitives, but are Artists in their own right. These special kinds of collections take advantage of various optimizations that can be assumed when rendering similar or identical things. You use these collections all the time whether you realize it or not! Markers are implemented this way (so, whenever you do `plot()` or `scatter()`, for example).", "_____no_output_____" ] ], [ [ "from matplotlib.collections import RegularPolyCollection\n\nfig, ax = plt.subplots(1, 1)\noffsets = np.random.rand(20, 2)\ncollection = RegularPolyCollection(\n numsides=5, # a pentagon\n sizes=(150,),\n offsets=offsets,\n transOffset=ax.transData,\n )\nax.add_collection(collection)\nplt.show()", "_____no_output_____" ] ], [ [ "## Exercise 5.1\nGive yourselves 4 gold stars!\n\nHint: [StarPolygonCollection](http://matplotlib.org/api/collections_api.html#matplotlib.collections.StarPolygonCollection)", "_____no_output_____" ] ], [ [ "%load exercises/5.1-goldstar.py", "_____no_output_____" ], [ "from matplotlib.collections import StarPolygonCollection\n\nfig, ax = plt.subplots(1, 1)\n\ncollection = StarPolygonCollection(5,\n offsets=[(0.5, 0.5)],\n transOffset=ax.transData)\nax.add_collection(collection)\nplt.show()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
c52cd7c3ac46cd1f3d410cee8b947186403e0012
78,202
ipynb
Jupyter Notebook
nbs/12_optimizer.ipynb
richarddwang/fastai
93b995d69b10ed25f4d7d155206871ad5e19cd51
[ "Apache-2.0" ]
null
null
null
nbs/12_optimizer.ipynb
richarddwang/fastai
93b995d69b10ed25f4d7d155206871ad5e19cd51
[ "Apache-2.0" ]
null
null
null
nbs/12_optimizer.ipynb
richarddwang/fastai
93b995d69b10ed25f4d7d155206871ad5e19cd51
[ "Apache-2.0" ]
null
null
null
39.179359
10,808
0.610995
[ [ [ "# default_exp optimizer", "_____no_output_____" ], [ "#export\nfrom fastai.torch_basics import *", "_____no_output_____" ], [ "#hide\nfrom nbdev.showdoc import *", "_____no_output_____" ] ], [ [ "# Optimizer\n\n> Define the general fastai optimizer and the variants", "_____no_output_____" ], [ "## `_BaseOptimizer` -", "_____no_output_____" ] ], [ [ "#export\nclass _BaseOptimizer():\n \"Common functionality between `Optimizer` and `OptimWrapper`\"\n def all_params(self, n=slice(None), with_grad=False):\n res = L((p,pg,self.state[p],hyper) for pg,hyper in zip(self.param_lists[n],self.hypers[n]) for p in pg)\n return L(o for o in res if o[0].grad is not None) if with_grad else res\n\n def _set_require_grad(self, rg, p,pg,state,h): p.requires_grad_(rg or state.get('force_train', False))\n def freeze_to(self, n):\n self.frozen_idx = n if n >= 0 else len(self.param_lists) + n\n if self.frozen_idx >= len(self.param_lists):\n warn(f\"Freezing {self.frozen_idx} groups; model has {len(self.param_lists)}; whole model is frozen.\")\n for o in self.all_params(slice(n, None)): self._set_require_grad(True, *o)\n for o in self.all_params(slice(None, n)): self._set_require_grad(False, *o)\n\n def freeze(self):\n assert(len(self.param_lists)>1)\n self.freeze_to(-1)\n\n def set_freeze(self, n, rg, ignore_force_train=False):\n for p in self.param_lists[n]: p.requires_grad_(rg or (state.get('force_train', False) and not ignore_force_train))\n\n def unfreeze(self): self.freeze_to(0)\n def set_hypers(self, **kwargs): L(kwargs.items()).starmap(self.set_hyper)\n def _set_hyper(self, k, v):\n for v_,h in zip(v, self.hypers): h[k] = v_\n\n def set_hyper(self, k, v):\n if isinstance(v, slice):\n if v.start: v = even_mults(v.start, v.stop, len(self.param_lists))\n else: v = [v.stop/10]*(len(self.param_lists)-1) + [v.stop]\n v = L(v, use_list=None)\n if len(v)==1: v = v*len(self.param_lists)\n assert len(v) == len(self.hypers), f\"Trying to set {len(v)} values for {k} but there are {len(self.param_lists)} parameter groups.\"\n self._set_hyper(k, v)\n\n @property\n def param_groups(self): return [{**{'params': pg}, **hp} for pg,hp in zip(self.param_lists, self.hypers)]\n @param_groups.setter\n def param_groups(self, v):\n for pg,v_ in zip(self.param_lists,v): pg = v_['params']\n for hyper,v_ in zip(self.hypers,v):\n for k,t in v_.items():\n if k != 'params': hyper[k] = t", "_____no_output_____" ], [ "add_docs(_BaseOptimizer, \n all_params=\"List of param_groups, parameters, and hypers\",\n freeze_to=\"Freeze parameter groups up to `n`\",\n freeze=\"Freeze up to last parameter group\",\n set_freeze=\"Set `rg` for parameter group `n` only\",\n unfreeze=\"Unfreeze the entire model\",\n set_hypers=\"`set_hyper` for all `kwargs`\",\n set_hyper=\"Set the value(s) in `v` for hyper-parameter `k`\")", "_____no_output_____" ], [ "#export\ndef _update(state, new=None):\n if new is None: return state\n if isinstance(new, dict): state.update(new)\n return state", "_____no_output_____" ] ], [ [ "## `Optimizer` -", "_____no_output_____" ] ], [ [ "# export\n@log_args(but='params,cbs,defaults')\nclass Optimizer(_BaseOptimizer):\n \"Base optimizer class for the fastai library, updating `params` with `cbs`\"\n _keep_on_clear = ['force_train', 'do_wd']\n def __init__(self, params, cbs, train_bn=True, **defaults):\n params = L(params)\n self.cbs,self.state,self.train_bn = L(cbs),defaultdict(dict),train_bn\n defaults = merge(*self.cbs.attrgot('defaults'), defaults)\n self.param_lists = L(L(p) for p in params) if isinstance(params[0], (L,list)) else L([params])\n self.hypers = L({} for _ in range_of(self.param_lists))\n self.set_hypers(**defaults)\n self.frozen_idx = 0\n\n def zero_grad(self):\n for p,*_ in self.all_params(with_grad=True):\n p.grad.detach_()\n p.grad.zero_()\n\n def step(self):\n for p,pg,state,hyper in self.all_params(with_grad=True):\n for cb in self.cbs: state = _update(state, cb(p, **{**state, **hyper}))\n self.state[p] = state\n\n def clear_state(self):\n for p,pg,state,hyper in self.all_params():\n self.state[p] = {k: state[k] for k in self._keep_on_clear if k in state}\n\n def state_dict(self):\n state = [self.state[p] for p,*_ in self.all_params()]\n return {'state': state, 'hypers': self.hypers}\n\n def load_state_dict(self, sd):\n assert len(sd[\"hypers\"]) == len(self.param_lists)\n assert len(sd[\"state\"]) == sum([len(pg) for pg in self.param_lists])\n self.hypers = sd['hypers']\n self.state = {p: s for p,s in zip(self.all_params().itemgot(0), sd['state'])}", "_____no_output_____" ], [ "add_docs(Optimizer, \n zero_grad=\"Standard PyTorch API: Zero all the grad attributes of the parameters\",\n step=\"Standard PyTorch API: Update the stats and execute the steppers in on all parameters that have a grad\",\n state_dict=\"Return the state of the optimizer in a dictionary\",\n load_state_dict=\"Load the content of `sd`\",\n clear_state=\"Reset the state of the optimizer\")", "_____no_output_____" ] ], [ [ "### Initializing an Optimizer", "_____no_output_____" ], [ "`params` will be used to create the `param_groups` of the optimizer. If it's a collection (or a generator) of parameters, it will be a `L` containing one `L` with all the parameters. To define multiple parameter groups `params` should be passed as a collection (or a generator) of `L`s.\n\n> Note: In PyTorch, <code>model.parameters()</code> returns a generator with all the parameters, that you can directly pass to <code>Optimizer</code>.", "_____no_output_____" ] ], [ [ "opt = Optimizer([1,2,3], noop)\ntest_eq(opt.param_lists, [[1,2,3]])\nopt = Optimizer(range(3), noop)\ntest_eq(opt.param_lists, [[0,1,2]])\nopt = Optimizer([[1,2],[3]], noop)\ntest_eq(opt.param_lists, [[1,2],[3]])\nopt = Optimizer(([o,o+1] for o in range(0,4,2)), noop)\ntest_eq(opt.param_lists, [[0,1],[2,3]])", "_____no_output_____" ] ], [ [ "`cbs` is a list of functions that will be composed when applying the step. For instance, you can compose a function making the SGD step, with another one applying weight decay. Additionally, each `cb` can have a `defaults` attribute that contains hyper-parameters and their default value. Those are all gathered at initialization, and new values can be passed to override those defaults with the `defaults` kwargs. The steppers will be called by `Optimizer.step` (which is the standard PyTorch name), and gradients can be cleared with `Optimizer.zero_grad` (also a standard PyTorch name).\n\nOnce the defaults have all been pulled off, they are copied as many times as there are `param_groups` and stored in `hypers`. To apply different hyper-parameters to different groups (differential learning rates, or no weight decay for certain layers for instance), you will need to adjust those values after the init. ", "_____no_output_____" ] ], [ [ "def tst_arg(p, lr=0, **kwargs): return p\ntst_arg.defaults = dict(lr=1e-2)\n\ndef tst_arg2(p, lr2=0, **kwargs): return p\ntst_arg2.defaults = dict(lr2=1e-3)\n\ndef tst_arg3(p, mom=0, **kwargs): return p\ntst_arg3.defaults = dict(mom=0.9)\n\ndef tst_arg4(p, **kwargs): return p\n\nopt = Optimizer([1,2,3], [tst_arg,tst_arg2, tst_arg3])\ntest_eq(opt.hypers, [{'lr2': 1e-3, 'mom': 0.9, 'lr': 1e-2}])\nopt = Optimizer([1,2,3], tst_arg, lr=0.1)\ntest_eq(opt.hypers, [{'lr': 0.1}])\nopt = Optimizer([[1,2],[3]], tst_arg)\ntest_eq(opt.hypers, [{'lr': 1e-2}, {'lr': 1e-2}])\nopt = Optimizer([[1,2],[3]], tst_arg, lr=0.1)\ntest_eq(opt.hypers, [{'lr': 0.1}, {'lr': 0.1}])", "_____no_output_____" ] ], [ [ "For each hyper-parameter, you can pass a slice or a collection to set them, if there are multiple parameter groups. A slice will be converted to a log-uniform collection from its beginning to its end, or if it only has an end `e`, to a collection of as many values as there are parameter groups that are `...,e/10,e/10,e`.\n\nSetting an hyper-parameter with a collection that has a different number of elements than the optimizer has parameter groups will raise an error.", "_____no_output_____" ] ], [ [ "opt = Optimizer([[1,2],[3]], tst_arg, lr=[0.1,0.2])\ntest_eq(opt.hypers, [{'lr': 0.1}, {'lr': 0.2}])\nopt = Optimizer([[1,2],[3],[4]], tst_arg, lr=slice(1e-2))\ntest_eq(opt.hypers, [{'lr': 1e-3}, {'lr': 1e-3}, {'lr': 1e-2}])\nopt = Optimizer([[1,2],[3],[4]], tst_arg, lr=slice(1e-4,1e-2))\ntest_eq(opt.hypers, [{'lr': 1e-4}, {'lr': 1e-3}, {'lr': 1e-2}])\ntest_eq(opt.param_groups, [{'params': [1,2], 'lr': 1e-4}, {'params': [3], 'lr': 1e-3}, {'params': [4], 'lr': 1e-2}])\ntest_fail(lambda: Optimizer([[1,2],[3],[4]], tst_arg, lr=np.array([0.1,0.2])))", "_____no_output_____" ] ], [ [ "### Basic steppers", "_____no_output_____" ], [ "To be able to give examples of optimizer steps, we will need some steppers, like the following:", "_____no_output_____" ] ], [ [ "#export\ndef sgd_step(p, lr, **kwargs):\n p.data.add_(p.grad.data, alpha=-lr)", "_____no_output_____" ], [ "def tst_param(val, grad=None):\n \"Create a tensor with `val` and a gradient of `grad` for testing\"\n res = tensor([val]).float()\n res.grad = tensor([val/10 if grad is None else grad]).float()\n return res", "_____no_output_____" ], [ "p = tst_param(1., 0.1)\nsgd_step(p, 1.)\ntest_eq(p, tensor([0.9]))\ntest_eq(p.grad, tensor([0.1]))", "_____no_output_____" ], [ "#export\ndef weight_decay(p, lr, wd, do_wd=True, **kwargs):\n \"Weight decay as decaying `p` with `lr*wd`\"\n if do_wd and wd!=0: p.data.mul_(1 - lr*wd)\n\nweight_decay.defaults = dict(wd=0.)", "_____no_output_____" ], [ "p = tst_param(1., 0.1)\nweight_decay(p, 1., 0.1)\ntest_eq(p, tensor([0.9]))\ntest_eq(p.grad, tensor([0.1]))", "_____no_output_____" ], [ "#export\ndef l2_reg(p, lr, wd, do_wd=True, **kwargs):\n \"L2 regularization as adding `wd*p` to `p.grad`\"\n if do_wd and wd!=0: p.grad.data.add_(p.data, alpha=wd)\n\nl2_reg.defaults = dict(wd=0.)", "_____no_output_____" ], [ "p = tst_param(1., 0.1)\nl2_reg(p, 1., 0.1)\ntest_eq(p, tensor([1.]))\ntest_eq(p.grad, tensor([0.2]))", "_____no_output_____" ] ], [ [ "> Warning: Weight decay and L2 regularization is the same thing for basic SGD, but for more complex optimizers, they are very different.", "_____no_output_____" ], [ "### Making the step", "_____no_output_____" ] ], [ [ "show_doc(Optimizer.step)", "_____no_output_____" ] ], [ [ "This method will loop over all param groups, then all parameters for which `grad` is not None and call each function in `stepper`, passing it the parameter `p` with the hyper-parameters in the corresponding dict in `hypers`.", "_____no_output_____" ] ], [ [ "#test basic step\nr = L.range(4)\ndef tst_params(): return r.map(tst_param)\n\nparams = tst_params()\nopt = Optimizer(params, sgd_step, lr=0.1)\nopt.step()\ntest_close([p.item() for p in params], r.map(mul(0.99)))", "_____no_output_____" ], [ "#test two steps\nparams = tst_params()\nopt = Optimizer(params, [weight_decay, sgd_step], lr=0.1, wd=0.1)\nopt.step()\ntest_close([p.item() for p in params], r.map(mul(0.98)))", "_____no_output_____" ], [ "#test None gradients are ignored\nparams = tst_params()\nopt = Optimizer(params, sgd_step, lr=0.1)\nparams[-1].grad = None\nopt.step()\ntest_close([p.item() for p in params], [0., 0.99, 1.98, 3.])", "_____no_output_____" ], [ "#test discriminative lrs\nparams = tst_params()\nopt = Optimizer([params[:2], params[2:]], sgd_step, lr=0.1)\nopt.hypers[0]['lr'] = 0.01\nopt.step()\ntest_close([p.item() for p in params], [0., 0.999, 1.98, 2.97])", "_____no_output_____" ], [ "show_doc(Optimizer.zero_grad)", "_____no_output_____" ], [ "params = tst_params()\nopt = Optimizer(params, [weight_decay, sgd_step], lr=0.1, wd=0.1)\nopt.zero_grad()\n[test_eq(p.grad, tensor([0.])) for p in params];", "_____no_output_____" ] ], [ [ "Some of the `Optimizer` `cbs` can be functions updating the state associated with a parameter. That state can then be used by any stepper. The best example is a momentum calculation.", "_____no_output_____" ] ], [ [ "def tst_stat(p, **kwargs): \n s = kwargs.get('sum', torch.zeros_like(p)) + p.data\n return {'sum': s}\ntst_stat.defaults = {'mom': 0.9}\n\n#Test Optimizer init\nopt = Optimizer([1,2,3], tst_stat)\ntest_eq(opt.hypers, [{'mom': 0.9}])\nopt = Optimizer([1,2,3], tst_stat, mom=0.99)\ntest_eq(opt.hypers, [{'mom': 0.99}])\n\n#Test stat\nx = torch.randn(4,5)\nstate = tst_stat(x)\nassert 'sum' in state\ntest_eq(x, state['sum'])\nstate = tst_stat(x, **state)\ntest_eq(state['sum'], 2*x)", "_____no_output_____" ] ], [ [ "## Statistics", "_____no_output_____" ] ], [ [ "# export\ndef average_grad(p, mom, dampening=False, grad_avg=None, **kwargs):\n \"Keeps track of the avg grads of `p` in `state` with `mom`.\"\n if grad_avg is None: grad_avg = torch.zeros_like(p.grad.data)\n damp = 1-mom if dampening else 1.\n grad_avg.mul_(mom).add_(p.grad.data, alpha=damp)\n return {'grad_avg': grad_avg}\n\naverage_grad.defaults = dict(mom=0.9)", "_____no_output_____" ] ], [ [ "`dampening=False` gives the classical formula for momentum in SGD: \n```\nnew_val = old_val * mom + grad\n```\nwhereas `dampening=True` makes it an exponential moving average:\n```\nnew_val = old_val * mom + grad * (1-mom)\n```", "_____no_output_____" ] ], [ [ "p = tst_param([1,2,3], [4,5,6])\nstate = {}\nstate = average_grad(p, mom=0.9, **state)\ntest_eq(state['grad_avg'], p.grad)\nstate = average_grad(p, mom=0.9, **state)\ntest_eq(state['grad_avg'], p.grad * 1.9)\n\n#Test dampening\nstate = {}\nstate = average_grad(p, mom=0.9, dampening=True, **state)\ntest_eq(state['grad_avg'], 0.1*p.grad)\nstate = average_grad(p, mom=0.9, dampening=True, **state)\ntest_close(state['grad_avg'], (0.1*0.9+0.1)*p.grad)", "_____no_output_____" ], [ "# export\ndef average_sqr_grad(p, sqr_mom, dampening=True, sqr_avg=None, **kwargs):\n if sqr_avg is None: sqr_avg = torch.zeros_like(p.grad.data)\n damp = 1-sqr_mom if dampening else 1.\n sqr_avg.mul_(sqr_mom).addcmul_(p.grad.data, p.grad.data, value=damp)\n return {'sqr_avg': sqr_avg}\n\naverage_sqr_grad.defaults = dict(sqr_mom=0.99)", "_____no_output_____" ] ], [ [ "`dampening=False` gives the classical formula for momentum in SGD: \n```\nnew_val = old_val * mom + grad**2\n```\nwhereas `dampening=True` makes it an exponential moving average:\n```\nnew_val = old_val * mom + (grad**2) * (1-mom)\n```", "_____no_output_____" ] ], [ [ "p = tst_param([1,2,3], [4,5,6])\nstate = {}\nstate = average_sqr_grad(p, sqr_mom=0.99, dampening=False, **state)\ntest_eq(state['sqr_avg'], p.grad.pow(2))\nstate = average_sqr_grad(p, sqr_mom=0.99, dampening=False, **state)\ntest_eq(state['sqr_avg'], p.grad.pow(2) * 1.99)\n\n#Test dampening\nstate = {}\nstate = average_sqr_grad(p, sqr_mom=0.99, **state)\ntest_close(state['sqr_avg'], 0.01*p.grad.pow(2))\nstate = average_sqr_grad(p, sqr_mom=0.99, **state)\ntest_close(state['sqr_avg'], (0.01*0.99+0.01)*p.grad.pow(2))", "_____no_output_____" ] ], [ [ "### Freezing part of the model", "_____no_output_____" ] ], [ [ "show_doc(Optimizer.freeze, name=\"Optimizer.freeze\")", "_____no_output_____" ], [ "show_doc(Optimizer.freeze_to, name=\"Optimizer.freeze_to\")", "_____no_output_____" ], [ "show_doc(Optimizer.unfreeze, name=\"Optimizer.unfreeze\")", "_____no_output_____" ], [ "#Freezing the first layer\nparams = [tst_params(), tst_params(), tst_params()]\nopt = Optimizer(params, sgd_step, lr=0.1)\nopt.freeze_to(1)\nreq_grad = Self.requires_grad()\ntest_eq(L(params[0]).map(req_grad), [False]*4)\nfor i in {1,2}: test_eq(L(params[i]).map(req_grad), [True]*4)\n \n#Unfreezing\nopt.unfreeze()\nfor i in range(2): test_eq(L(params[i]).map(req_grad), [True]*4)\n\n#TODO: test warning\n# opt.freeze_to(3)", "_____no_output_____" ] ], [ [ "Parameters such as batchnorm weights/bias can be marked to always be in training mode, just put `force_train=true` in their state.", "_____no_output_____" ] ], [ [ "params = [tst_params(), tst_params(), tst_params()]\nopt = Optimizer(params, sgd_step, lr=0.1)\nfor p in L(params[1])[[1,3]]: opt.state[p] = {'force_train': True}\nopt.freeze()\ntest_eq(L(params[0]).map(req_grad), [False]*4)\ntest_eq(L(params[1]).map(req_grad), [False, True, False, True])\ntest_eq(L(params[2]).map(req_grad), [True]*4)", "_____no_output_____" ] ], [ [ "### Serializing", "_____no_output_____" ] ], [ [ "show_doc(Optimizer.state_dict)", "_____no_output_____" ], [ "show_doc(Optimizer.load_state_dict)", "_____no_output_____" ], [ "p = tst_param([1,2,3], [4,5,6])\nopt = Optimizer(p, average_grad)\nopt.step()\ntest_eq(opt.state[p]['grad_avg'], tensor([[4., 5., 6.]]))\n\nsd = opt.state_dict()\np1 = tst_param([10,20,30], [40,50,60])\nopt = Optimizer(p1, average_grad, mom=0.99)\ntest_eq(opt.hypers[0]['mom'], 0.99)\ntest_eq(opt.state, {})\n\nopt.load_state_dict(sd)\ntest_eq(opt.hypers[0]['mom'], 0.9)\ntest_eq(opt.state[p1]['grad_avg'], tensor([[4., 5., 6.]]))", "_____no_output_____" ], [ "show_doc(Optimizer.clear_state)", "_____no_output_____" ], [ "p = tst_param([1,2,3], [4,5,6])\nopt = Optimizer(p, average_grad)\nopt.state[p] = {'force_train': True}\nopt.step()\ntest_eq(opt.state[p]['grad_avg'], tensor([[4., 5., 6.]]))\n\nopt.clear_state()\ntest_eq(opt.state[p], {'force_train': True})", "_____no_output_____" ] ], [ [ "## Optimizers", "_____no_output_____" ], [ "### SGD with momentum", "_____no_output_____" ] ], [ [ "#export\ndef momentum_step(p, lr, grad_avg, **kwargs):\n \"Step for SGD with momentum with `lr`\"\n p.data.add_(grad_avg, alpha=-lr)", "_____no_output_____" ], [ "#export\n@log_args(to_return=True, but_as=Optimizer.__init__)\ndef SGD(params, lr, mom=0., wd=0., decouple_wd=True):\n \"A `Optimizer` for SGD with `lr` and `mom` and `params`\"\n cbs = [weight_decay] if decouple_wd else [l2_reg]\n if mom != 0: cbs.append(average_grad)\n cbs.append(sgd_step if mom==0 else momentum_step)\n return Optimizer(params, cbs, lr=lr, mom=mom, wd=wd)", "_____no_output_____" ] ], [ [ "Optional weight decay of `wd` is applied, as true weight decay (decay the weights directly) if `decouple_wd=True` else as L2 regularization (add the decay to the gradients).", "_____no_output_____" ] ], [ [ "#Vanilla SGD\nparams = tst_params()\nopt = SGD(params, lr=0.1)\nopt.step()\ntest_close([p.item() for p in params], [i*0.99 for i in range(4)])\nopt.step()\n[p.item() for p in params]\ntest_close([p.item() for p in params], [i*0.98 for i in range(4)])", "_____no_output_____" ], [ "#SGD with momentum\nparams = tst_params()\nopt = SGD(params, lr=0.1, mom=0.9)\nassert isinstance(opt, Optimizer)\nopt.step()\ntest_close([p.item() for p in params], [i*0.99 for i in range(4)])\nopt.step()\n[p.item() for p in params]\ntest_close([p.item() for p in params], [i*(1 - 0.1 * (0.1 + 0.1*1.9)) for i in range(4)])\nfor i,p in enumerate(params): test_close(opt.state[p]['grad_avg'].item(), i*0.19)", "_____no_output_____" ] ], [ [ "Test weight decay, notice how we can see that L2 regularization is different from weight decay even for simple SGD with momentum.", "_____no_output_____" ] ], [ [ "params = tst_params()\n#Weight decay\nopt = SGD(params, lr=0.1, mom=0.9, wd=0.1)\nopt.step()\ntest_close([p.item() for p in params], [i*0.98 for i in range(4)])\n#L2 reg\nopt = SGD(params, lr=0.1, mom=0.9, wd=0.1, decouple_wd=False)\nopt.step()\n#TODO: fix cause this formula was wrong\n#test_close([p.item() for p in params], [i*0.97 for i in range(4)])", "_____no_output_____" ] ], [ [ "### RMSProp", "_____no_output_____" ] ], [ [ "#export\ndef rms_prop_step(p, lr, sqr_avg, eps, grad_avg=None, **kwargs):\n \"Step for SGD with momentum with `lr`\"\n denom = sqr_avg.sqrt().add_(eps)\n p.data.addcdiv_((grad_avg if grad_avg is not None else p.grad), denom, value=-lr)\n\nrms_prop_step.defaults = dict(eps=1e-8)", "_____no_output_____" ], [ "#export\n@log_args(to_return=True, but_as=Optimizer.__init__)\ndef RMSProp(params, lr, sqr_mom=0.99, mom=0., wd=0., decouple_wd=True):\n \"A `Optimizer` for RMSProp with `lr`, `sqr_mom`, `mom` and `params`\"\n cbs = [weight_decay] if decouple_wd else [l2_reg]\n cbs += ([average_sqr_grad] if mom==0. else [average_grad, average_sqr_grad])\n cbs.append(rms_prop_step)\n return Optimizer(params, cbs, lr=lr, mom=mom, sqr_mom=sqr_mom, wd=wd)", "_____no_output_____" ] ], [ [ "RMSProp was introduced by Geoffrey Hinton in his [course](http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf). What is named `sqr_mom` here is the `alpha` in the course. Optional weight decay of `wd` is applied, as true weight decay (decay the weights directly) if `decouple_wd=True` else as L2 regularization (add the decay to the gradients).", "_____no_output_____" ] ], [ [ "#Without momentum\nparams = tst_param([1,2,3], [0.1,0.2,0.3])\nopt = RMSProp(params, lr=0.1)\nopt.step()\ntest_close(params[0], tensor([0.,1.,2.]))\nopt.step()\nstep = - 0.1 * 0.1 / (math.sqrt((0.01*0.99+0.01) * 0.1**2) + 1e-8)\ntest_close(params[0], tensor([step, 1+step, 2+step]))", "_____no_output_____" ], [ "#With momentum\nparams = tst_param([1,2,3], [0.1,0.2,0.3])\nopt = RMSProp(params, lr=0.1, mom=0.9)\nopt.step()\ntest_close(params[0], tensor([0.,1.,2.]))\nopt.step()\nstep = - 0.1 * (0.1 + 0.9*0.1) / (math.sqrt((0.01*0.99+0.01) * 0.1**2) + 1e-8)\ntest_close(params[0], tensor([step, 1+step, 2+step]))", "_____no_output_____" ] ], [ [ "### Adam", "_____no_output_____" ] ], [ [ "#export\ndef step_stat(p, step=0, **kwargs):\n \"Register the number of steps done in `state` for `p`\"\n step += 1\n return {'step' : step}", "_____no_output_____" ], [ "p = tst_param(1,0.1)\nstate = {}\nstate = step_stat(p, **state)\ntest_eq(state['step'], 1)\nfor _ in range(5): state = step_stat(p, **state)\ntest_eq(state['step'], 6)", "_____no_output_____" ], [ "#export\ndef debias(mom, damp, step): return damp * (1 - mom**step) / (1-mom)", "_____no_output_____" ], [ "#export\ndef adam_step(p, lr, mom, step, sqr_mom, grad_avg, sqr_avg, eps, **kwargs):\n \"Step for Adam with `lr` on `p`\"\n debias1 = debias(mom, 1-mom, step)\n debias2 = debias(sqr_mom, 1-sqr_mom, step)\n p.data.addcdiv_(grad_avg, (sqr_avg/debias2).sqrt() + eps, value = -lr / debias1)\n return p\n\nadam_step._defaults = dict(eps=1e-5)", "_____no_output_____" ], [ "#export\n@log_args(to_return=True, but_as=Optimizer.__init__)\ndef Adam(params, lr, mom=0.9, sqr_mom=0.99, eps=1e-5, wd=0.01, decouple_wd=True):\n \"A `Optimizer` for Adam with `lr`, `mom`, `sqr_mom`, `eps` and `params`\"\n cbs = [weight_decay] if decouple_wd else [l2_reg]\n cbs += [partial(average_grad, dampening=True), average_sqr_grad, step_stat, adam_step]\n return Optimizer(params, cbs, lr=lr, mom=mom, sqr_mom=sqr_mom, eps=eps, wd=wd)", "_____no_output_____" ] ], [ [ "Adam was introduced by Diederik P. Kingma and Jimmy Ba in [Adam: A Method for Stochastic Optimization](https://arxiv.org/abs/1412.6980). For consistency across optimizers, we renamed `beta1` and `beta2` in the paper to `mom` and `sqr_mom`. Note that our defaults also differ from the paper (0.99 for `sqr_mom` or `beta2`, 1e-5 for `eps`). Those values seem to be better from our experiments in a wide range of situations.\n\nOptional weight decay of `wd` is applied, as true weight decay (decay the weights directly) if `decouple_wd=True` else as L2 regularization (add the decay to the gradients).\n\n> Note: Don't forget that `eps` is an hyper-parameter you can change. Some models won't train without a very high `eps` like 0.1 (intuitively, the higher `eps` is, the closer we are to normal SGD). The usual default of 1e-8 is often too extreme in the sense we don't manage to get as good results as with SGD. ", "_____no_output_____" ] ], [ [ "params = tst_param([1,2,3], [0.1,0.2,0.3])\nopt = Adam(params, lr=0.1, wd=0)\nopt.step()\nstep = -0.1 * 0.1 / (math.sqrt(0.1**2) + 1e-8)\ntest_close(params[0], tensor([1+step, 2+step, 3+step]))\nopt.step()\ntest_close(params[0], tensor([1+2*step, 2+2*step, 3+2*step]), eps=1e-3)", "_____no_output_____" ] ], [ [ "### RAdam", "_____no_output_____" ], [ "RAdam (for rectified Adam) was introduced by Zhang et al. in [On the Variance of the Adaptive Learning Rate and Beyond](https://arxiv.org/abs/1907.08610) to slightly modify the Adam optimizer to be more stable at the beginning of training (and thus not require a long warmup). They use an estimate of the variance of the moving average of the squared gradients (the term in the denominator of traditional Adam) and rescale this moving average by this term before performing the update.\n\nThis version also incorporates [SAdam](https://arxiv.org/abs/1908.00700); set `beta` to enable this (definition same as in the paper).", "_____no_output_____" ] ], [ [ "#export\ndef radam_step(p, lr, mom, step, sqr_mom, grad_avg, sqr_avg, eps, beta, **kwargs):\n \"Step for RAdam with `lr` on `p`\"\n debias1 = debias(mom, 1-mom, step)\n debias2 = debias(sqr_mom, 1-sqr_mom, step)\n r_inf = 2/(1-sqr_mom) - 1\n r = r_inf - 2*step*sqr_mom**step/(1-sqr_mom**step)\n if r > 5:\n v = math.sqrt(((r-4) * (r-2) * r_inf)/((r_inf-4)*(r_inf-2)*r))\n denom = (sqr_avg/debias2).sqrt()\n if eps: denom += eps\n if beta: denom = F.softplus(denom, beta)\n p.data.addcdiv_(grad_avg, denom, value = -lr*v / debias1)\n else: p.data.add_(grad_avg, alpha=-lr / debias1)\n return p\n\nradam_step._defaults = dict(eps=1e-5)", "_____no_output_____" ], [ "#export\n@log_args(to_return=True, but_as=Optimizer.__init__)\ndef RAdam(params, lr, mom=0.9, sqr_mom=0.99, eps=1e-5, wd=0., beta=0., decouple_wd=True):\n \"A `Optimizer` for Adam with `lr`, `mom`, `sqr_mom`, `eps` and `params`\"\n cbs = [weight_decay] if decouple_wd else [l2_reg]\n cbs += [partial(average_grad, dampening=True), average_sqr_grad, step_stat, radam_step]\n return Optimizer(params, cbs, lr=lr, mom=mom, sqr_mom=sqr_mom, eps=eps, wd=wd, beta=beta)", "_____no_output_____" ] ], [ [ "This is the effective correction reported to the adam step for 500 iterations in RAdam. We can see how it goes from 0 to 1, mimicking the effect of a warm-up.", "_____no_output_____" ] ], [ [ "beta = 0.99\nr_inf = 2/(1-beta) - 1\nrs = np.array([r_inf - 2*s*beta**s/(1-beta**s) for s in range(5,500)])\nv = np.sqrt(((rs-4) * (rs-2) * r_inf)/((r_inf-4)*(r_inf-2)*rs))\nplt.plot(v);", "_____no_output_____" ], [ "params = tst_param([1,2,3], [0.1,0.2,0.3])\nopt = RAdam(params, lr=0.1)\n#The r factor is lower than 5 during the first 5 steps so updates use the average of gradients (all the same)\nr_inf = 2/(1-0.99) - 1\nfor i in range(5): \n r = r_inf - 2*(i+1)*0.99**(i+1)/(1-0.99**(i+1))\n assert r <= 5\n opt.step()\np = tensor([0.95, 1.9, 2.85])\ntest_close(params[0], p)\n\n#The r factor is greater than 5 for the sixth step so we update with RAdam\nr = r_inf - 2*6*0.99**6/(1-0.99**6)\nassert r > 5\nopt.step()\nv = math.sqrt(((r-4) * (r-2) * r_inf)/((r_inf-4)*(r_inf-2)*r))\nstep = -0.1*0.1*v/(math.sqrt(0.1**2) + 1e-8)\ntest_close(params[0], p+step)", "_____no_output_____" ] ], [ [ "### QHAdam", "_____no_output_____" ], [ "QHAdam (for Quasi-Hyperbolic Adam) was introduced by Ma & Yarats in [Quasi-Hyperbolic Momentum and Adam for Deep Learning](https://arxiv.org/pdf/1810.06801.pdf) as a *\"computationally cheap, intuitive to interpret, and simple to implement\"* optimizer. Additional code can be found in their [qhoptim repo](https://github.com/facebookresearch/qhoptim). QHAdam is based on QH-Momentum, which introduces the immediate discount factor `nu`, encapsulating plain SGD (`nu = 0`) and momentum (`nu = 1`). QH-Momentum is defined below, where g_t+1 is the update of the moment. An interpretation of QHM is as a nu-weighted average of the momentum update step and the plain SGD update step.\n\n> θ_t+1 ← θ_t − lr * [(1 − nu) · ∇L_t(θ_t) + nu · g_t+1]\n\nQHAdam takes the concept behind QHM above and applies it to Adam, replacing both of Adam’s moment estimators with quasi-hyperbolic terms. \n\nThe paper's suggested default parameters are `mom = 0.999`, `sqr_mom = 0.999`, `nu_1 = 0.7` and `and nu_2 = 1.0`. When training is not stable, it is possible that setting `nu_2 < 1` can improve stability by imposing a tighter step size bound. Note that QHAdam recovers Adam when `nu_1 = nu_2 = 1.0`. QHAdam recovers RMSProp (Hinton et al., 2012) when `nu_1 = 0` and `nu_2 = 1`, and NAdam (Dozat, 2016) when `nu_1 = mom` and `nu_2 = 1`.\n\nOptional weight decay of `wd` is applied, as true weight decay (decay the weights directly) if `decouple_wd=True` else as L2 regularization (add the decay to the gradients).", "_____no_output_____" ] ], [ [ "#export\ndef qhadam_step(p, lr, mom, sqr_mom, sqr_avg, nu_1, nu_2, step, grad_avg, eps, **kwargs):\n debias1 = debias(mom, 1-mom, step)\n debias2 = debias(sqr_mom, 1-sqr_mom, step)\n p.data.addcdiv_(((1-nu_1) * p.grad.data) + (nu_1 * (grad_avg / debias1)),\n (((1 - nu_2) * (p.grad.data)**2) + (nu_2 * (sqr_avg / debias2))).sqrt() + eps,\n value = -lr)\n return p\n\nqhadam_step._defaults = dict(eps=1e-8)", "_____no_output_____" ], [ "#export\n@log_args(to_return=True, but_as=Optimizer.__init__)\ndef QHAdam(params, lr, mom=0.999, sqr_mom=0.999, nu_1=0.7, nu_2 = 1.0, eps=1e-8, wd=0., decouple_wd=True):\n \"An `Optimizer` for Adam with `lr`, `mom`, `sqr_mom`, `nus`, eps` and `params`\"\n cbs = [weight_decay] if decouple_wd else [l2_reg]\n cbs += [partial(average_grad, dampening=True), partial(average_sqr_grad, dampening=True), step_stat, qhadam_step]\n return Optimizer(params, cbs, lr=lr, nu_1=nu_1, nu_2=nu_2 ,\n mom=mom, sqr_mom=sqr_mom, eps=eps, wd=wd)", "_____no_output_____" ], [ "params = tst_param([1,2,3], [0.1,0.2,0.3])\nopt = QHAdam(params, lr=0.1)\nopt.step()\nstep = -0.1 * (((1-0.7) * 0.1) + (0.7 * 0.1)) / (\n math.sqrt(((1-1.0) * 0.1**2) + (1.0 * 0.1**2)) + 1e-8) \ntest_close(params[0], tensor([1+step, 2+step, 3+step]))\nopt.step()\ntest_close(params[0], tensor([1+2*step, 2+2*step, 3+2*step]), eps=1e-3)", "_____no_output_____" ] ], [ [ "### LARS/LARC", "_____no_output_____" ] ], [ [ "#export\ndef larc_layer_lr(p, lr, trust_coeff, wd, eps, clip=True, **kwargs):\n \"Computes the local lr before weight decay is applied\"\n p_norm,g_norm = torch.norm(p.data),torch.norm(p.grad.data)\n local_lr = lr*trust_coeff * (p_norm) / (g_norm + p_norm * wd + eps)\n return {'local_lr': min(lr, local_lr) if clip else local_lr}\n\nlarc_layer_lr.defaults = dict(trust_coeff=0.02, wd=0., eps=1e-8)", "_____no_output_____" ], [ "#export\ndef larc_step(p, local_lr, grad_avg=None, **kwargs):\n \"Step for LARC `local_lr` on `p`\"\n p.data.add_(p.grad.data if grad_avg is None else grad_avg, alpha = -local_lr)", "_____no_output_____" ], [ "#export\n@log_args(to_return=True, but_as=Optimizer.__init__)\ndef Larc(params, lr, mom=0.9, clip=True, trust_coeff=0.02, eps=1e-8, wd=0., decouple_wd=True):\n \"A `Optimizer` for Adam with `lr`, `mom`, `sqr_mom`, `eps` and `params`\"\n cbs = [weight_decay] if decouple_wd else [l2_reg]\n if mom!=0.: cbs.append(average_grad)\n cbs += [partial(larc_layer_lr, clip=clip), larc_step]\n return Optimizer(params, cbs, lr=lr, mom=mom, trust_coeff=trust_coeff, eps=eps, wd=wd)", "_____no_output_____" ] ], [ [ "The LARS optimizer was first introduced in [Large Batch Training of Convolutional Networks](https://arxiv.org/abs/1708.03888) then refined in its LARC variant (original LARS is with `clip=False`). A learning rate is computed for each individual layer with a certain `trust_coefficient`, then clipped to be always less than `lr`.\n\nOptional weight decay of `wd` is applied, as true weight decay (decay the weights directly) if `decouple_wd=True` else as L2 regularization (add the decay to the gradients).", "_____no_output_____" ] ], [ [ "params = [tst_param([1,2,3], [0.1,0.2,0.3]), tst_param([1,2,3], [0.01,0.02,0.03])]\nopt = Larc(params, lr=0.1)\nopt.step()\n#First param local lr is 0.02 < lr so it's not clipped\ntest_close(opt.state[params[0]]['local_lr'], 0.02)\n#Second param local lr is 0.2 > lr so it's clipped\ntest_eq(opt.state[params[1]]['local_lr'], 0.1)\ntest_close(params[0], tensor([0.998,1.996,2.994]))\ntest_close(params[1], tensor([0.999,1.998,2.997]))", "_____no_output_____" ], [ "params = [tst_param([1,2,3], [0.1,0.2,0.3]), tst_param([1,2,3], [0.01,0.02,0.03])]\nopt = Larc(params, lr=0.1, clip=False)\nopt.step()\n#No clipping\ntest_close(opt.state[params[0]]['local_lr'], 0.02)\ntest_close(opt.state[params[1]]['local_lr'], 0.2)\ntest_close(params[0], tensor([0.998,1.996,2.994]))\ntest_close(params[1], tensor([0.998,1.996,2.994]))", "_____no_output_____" ] ], [ [ "### LAMB", "_____no_output_____" ] ], [ [ "#export\ndef lamb_step(p, lr, mom, step, sqr_mom, grad_avg, sqr_avg, eps, **kwargs):\n \"Step for LAMB with `lr` on `p`\"\n debias1 = debias(mom, 1-mom, step)\n debias2 = debias(sqr_mom, 1-sqr_mom, step)\n r1 = p.data.pow(2).mean().sqrt()\n step = (grad_avg/debias1) / ((sqr_avg/debias2).sqrt()+eps)\n r2 = step.pow(2).mean().sqrt()\n q = 1 if r1 == 0 or r2 == 0 else min(r1/r2,10)\n p.data.add_(step, alpha = -lr * q)\n\nlamb_step._defaults = dict(eps=1e-6, wd=0.)", "_____no_output_____" ], [ "#export\n@log_args(to_return=True, but_as=Optimizer.__init__)\ndef Lamb(params, lr, mom=0.9, sqr_mom=0.99, eps=1e-5, wd=0., decouple_wd=True):\n \"A `Optimizer` for Adam with `lr`, `mom`, `sqr_mom`, `eps` and `params`\"\n cbs = [weight_decay] if decouple_wd else [l2_reg]\n cbs += [partial(average_grad, dampening=True), average_sqr_grad, step_stat, lamb_step]\n return Optimizer(params, cbs, lr=lr, mom=mom, sqr_mom=sqr_mom, eps=eps, wd=wd)", "_____no_output_____" ] ], [ [ "LAMB was introduced in [Large Batch Optimization for Deep Learning: Training BERT in 76 minutes](https://arxiv.org/abs/1904.00962). Intuitively, it's LARC applied to Adam. As in `Adam`, we renamed `beta1` and `beta2` in the paper to `mom` and `sqr_mom`. Note that our defaults also differ from the paper (0.99 for `sqr_mom` or `beta2`, 1e-5 for `eps`). Those values seem to be better from our experiments in a wide range of situations.\n\nOptional weight decay of `wd` is applied, as true weight decay (decay the weights directly) if `decouple_wd=True` else as L2 regularization (add the decay to the gradients).", "_____no_output_____" ] ], [ [ "params = tst_param([1,2,3], [0.1,0.2,0.3])\nopt = Lamb(params, lr=0.1)\nopt.step()\ntest_close(params[0], tensor([0.7840,1.7840,2.7840]), eps=1e-3)", "_____no_output_____" ] ], [ [ "## Lookahead -", "_____no_output_____" ], [ "Lookahead was introduced by Zhang et al. in [Lookahead Optimizer: k steps forward, 1 step back](https://arxiv.org/abs/1907.08610). It can be run on top of any optimizer and consists in having the final weights of the model be a moving average. In practice, we update our model using the internal optimizer but keep a copy of old weights that and every `k` steps, we change the weights by a moving average of the *fast weights* (the ones updated by the inner optimizer) with the *slow weights* (the copy of old weights). Those *slow weights* act like a stability mechanism.", "_____no_output_____" ] ], [ [ "#export\n@log_args(but='opt')\nclass Lookahead(Optimizer, GetAttr):\n \"Wrap `opt` in a lookahead optimizer\"\n _default='opt'\n def __init__(self, opt, k=6, alpha=0.5):\n store_attr('opt,k,alpha')\n self._init_state()\n\n def step(self):\n if self.slow_weights is None: self._copy_weights()\n self.opt.step()\n self.count += 1\n if self.count%self.k != 0: return\n for slow_pg,fast_pg in zip(self.slow_weights,self.param_lists):\n for slow_p,fast_p in zip(slow_pg,fast_pg):\n slow_p.data.add_(fast_p.data-slow_p.data, alpha=self.alpha)\n fast_p.data.copy_(slow_p.data)\n\n def clear_state(self):\n self.opt.clear_state()\n self._init_state()\n\n def state_dict(self):\n state = self.opt.state_dict()\n state.update({'count': self.count, 'slow_weights': self.slow_weights})\n return state\n\n def load_state_dict(self, sd):\n self.count = sd.pop('count')\n self.slow_weights = sd.pop('slow_weights')\n self.opt.load_state_dict(sd)\n\n def _init_state(self): self.count,self.slow_weights = 0,None\n def _copy_weights(self): self.slow_weights = L(L(p.clone().detach() for p in pg) for pg in self.param_lists)\n\n @property\n def param_lists(self): return self.opt.param_lists\n @param_lists.setter\n def param_lists(self, v): self.opt.param_lists = v", "_____no_output_____" ], [ "params = tst_param([1,2,3], [0.1,0.2,0.3])\np,g = params[0].data.clone(),tensor([0.1,0.2,0.3])\nopt = Lookahead(SGD(params, lr=0.1))\nfor k in range(5): opt.step()\n#first 5 steps are normal SGD steps\ntest_close(params[0], p - 0.5*g)\n#Since k=6, sixth step is a moving average of the 6 SGD steps with the initial weight\nopt.step()\ntest_close(params[0], p * 0.5 + (p-0.6*g) * 0.5)", "_____no_output_____" ], [ "#export\n@delegates(RAdam)\ndef ranger(p, lr, mom=0.95, wd=0.01, eps=1e-6, **kwargs):\n \"Convenience method for `Lookahead` with `RAdam`\"\n return Lookahead(RAdam(p, lr=lr, mom=mom, wd=wd, eps=eps, **kwargs))", "_____no_output_____" ] ], [ [ "## OptimWrapper -", "_____no_output_____" ] ], [ [ "#export\ndef detuplify_pg(d):\n res = {}\n for k,v in d.items():\n if k == 'params': continue\n if is_listy(v): res.update(**{f'{k}__{i}': v_ for i,v_ in enumerate(v)})\n else: res[k] = v\n return res", "_____no_output_____" ], [ "tst = {'lr': 1e-2, 'mom': 0.9, 'params':[0,1,2]}\ntest_eq(detuplify_pg(tst), {'lr': 1e-2, 'mom': 0.9})\ntst = {'lr': 1e-2, 'betas': (0.9,0.999), 'params':[0,1,2]}\ntest_eq(detuplify_pg(tst), {'lr': 1e-2, 'betas__0': 0.9, 'betas__1': 0.999})", "_____no_output_____" ], [ "#export\ndef set_item_pg(pg, k, v):\n if '__' not in k: pg[k] = v\n else:\n name,idx = k.split('__')\n pg[name] = tuple(v if i==int(idx) else pg[name][i] for i in range_of(pg[name]))\n return pg", "_____no_output_____" ], [ "tst = {'lr': 1e-2, 'mom': 0.9, 'params':[0,1,2]}\ntest_eq(set_item_pg(tst, 'lr', 1e-3), {'lr': 1e-3, 'mom': 0.9, 'params':[0,1,2]})\ntst = {'lr': 1e-2, 'betas': (0.9,0.999), 'params':[0,1,2]}\ntest_eq(set_item_pg(tst, 'betas__0', 0.95), {'lr': 1e-2, 'betas': (0.95,0.999), 'params':[0,1,2]})", "_____no_output_____" ], [ "#export\npytorch_hp_map = {'momentum': 'mom', 'weight_decay': 'wd', 'alpha': 'sqr_mom', 'betas__0': 'mom', 'betas__1': 'sqr_mom'}", "_____no_output_____" ], [ "#export\nclass OptimWrapper(_BaseOptimizer, GetAttr):\n _xtra=['zero_grad', 'step', 'state_dict', 'load_state_dict']\n _default='opt'\n def __init__(self, opt, hp_map=None):\n self.opt = opt\n if hp_map is None: hp_map = pytorch_hp_map\n self.fwd_map = {k: hp_map[k] if k in hp_map else k for k in detuplify_pg(opt.param_groups[0]).keys()}\n self.bwd_map = {v:k for k,v in self.fwd_map.items()}\n self.state = defaultdict(dict, {})\n self.frozen_idx = 0\n\n @property\n def hypers(self):\n return [{self.fwd_map[k]:v for k,v in detuplify_pg(pg).items() if k != 'params'} for pg in self.opt.param_groups]\n\n def _set_hyper(self, k, v):\n for pg,v_ in zip(self.opt.param_groups,v): pg = set_item_pg(pg, self.bwd_map[k], v_)\n\n def clear_state(self): self.opt.state = defaultdict(dict, {})\n\n @property\n def param_lists(self): return [pg['params'] for pg in self.opt.param_groups]\n @param_lists.setter\n def param_lists(self, v):\n for pg,v_ in zip(self.opt.param_groups,v): pg['params'] = v_", "_____no_output_____" ], [ "sgd = SGD([tensor([1,2,3])], lr=1e-3, mom=0.9, wd=1e-2)\ntst_sgd = OptimWrapper(torch.optim.SGD([tensor([1,2,3])], lr=1e-3, momentum=0.9, weight_decay=1e-2))\n#Access to param_groups\ntest_eq(tst_sgd.param_lists, sgd.param_lists)\n#Set param_groups\ntst_sgd.param_lists = [[tensor([4,5,6])]]\ntest_eq(tst_sgd.opt.param_groups[0]['params'], [tensor(4,5,6)])\n#Access to hypers\ntest_eq(tst_sgd.hypers, [{**sgd.hypers[0], 'dampening': 0., 'nesterov': False}])\n#Set hypers\ntst_sgd.set_hyper('mom', 0.95)\ntest_eq(tst_sgd.opt.param_groups[0]['momentum'], 0.95)", "_____no_output_____" ], [ "tst_sgd = OptimWrapper(torch.optim.SGD([{'params': [tensor([1,2,3])], 'lr': 1e-3}, \n {'params': [tensor([4,5,6])], 'lr': 1e-2}], momentum=0.9, weight_decay=1e-2))\nsgd = SGD([[tensor([1,2,3])], [tensor([4,5,6])]], lr=[1e-3, 1e-2], mom=0.9, wd=1e-2)\n#Access to param_groups\ntest_eq(tst_sgd.param_lists, sgd.param_lists)\n#Set param_groups\ntst_sgd.param_lists = [[tensor([4,5,6])], [tensor([1,2,3])]]\ntest_eq(tst_sgd.opt.param_groups[0]['params'], [tensor(4,5,6)])\ntest_eq(tst_sgd.opt.param_groups[1]['params'], [tensor(1,2,3)])\n#Access to hypers\ntest_eq(tst_sgd.hypers, [{**sgd.hypers[i], 'dampening': 0., 'nesterov': False} for i in range(2)])\n#Set hypers\ntst_sgd.set_hyper('mom', 0.95)\ntest_eq([pg['momentum'] for pg in tst_sgd.opt.param_groups], [0.95,0.95])\ntst_sgd.set_hyper('lr', [1e-4,1e-3])\ntest_eq([pg['lr'] for pg in tst_sgd.opt.param_groups], [1e-4,1e-3])", "_____no_output_____" ], [ "#hide\n#check it works with tuply hp names like in Adam\ntst_adam = OptimWrapper(torch.optim.Adam([tensor([1,2,3])], lr=1e-2, betas=(0.9, 0.99)))\ntest_eq(tst_adam.hypers, [{'lr': 0.01, 'mom': 0.9, 'sqr_mom': 0.99, 'eps': 1e-08, 'wd': 0, 'amsgrad': False}])\ntst_adam.set_hyper('mom', 0.95)\ntest_eq(tst_adam.opt.param_groups[0]['betas'], (0.95, 0.99))\ntst_adam.set_hyper('sqr_mom', 0.9)\ntest_eq(tst_adam.opt.param_groups[0]['betas'], (0.95, 0.9))", "_____no_output_____" ], [ "def _mock_train(m, x, y, opt):\n m.train()\n for i in range(0, 100, 25):\n z = m(x[i:i+25])\n loss = F.mse_loss(z, y[i:i+25])\n loss.backward()\n opt.step()\n opt.zero_grad()", "_____no_output_____" ], [ "m = nn.Linear(4,5)\nx = torch.randn(100, 3, 4)\ny = torch.randn(100, 3, 5)\ntry:\n torch.save(m.state_dict(), 'tmp.pth')\n wgt,bias = m.weight.data.clone(),m.bias.data.clone()\n\n m.load_state_dict(torch.load('tmp.pth'))\n opt1 = OptimWrapper(torch.optim.AdamW(m.parameters(), betas=(0.9, 0.99), eps=1e-5, weight_decay=1e-2))\n _mock_train(m, x.clone(), y.clone(), opt1)\n wgt1,bias1 = m.weight.data.clone(),m.bias.data.clone()\n\n m.load_state_dict(torch.load('tmp.pth'))\n opt2 = Adam(m.parameters(), 1e-3, wd=1e-2)\n _mock_train(m, x.clone(), y.clone(), opt2)\n wgt2,bias2 = m.weight.data.clone(),m.bias.data.clone()\n \n test_close(wgt1,wgt2,eps=1e-3)\n test_close(bias1,bias2,eps=1e-3)\nfinally: os.remove('tmp.pth')", "_____no_output_____" ], [ "m = nn.Linear(4,5)\nx = torch.randn(100, 3, 4)\ny = torch.randn(100, 3, 5)\ntry:\n torch.save(m.state_dict(), 'tmp.pth')\n wgt,bias = m.weight.data.clone(),m.bias.data.clone()\n\n m.load_state_dict(torch.load('tmp.pth'))\n opt1 = OptimWrapper(torch.optim.Adam(m.parameters(), betas=(0.9, 0.99), eps=1e-5, weight_decay=1e-2))\n _mock_train(m, x.clone(), y.clone(), opt1)\n wgt1,bias1 = m.weight.data.clone(),m.bias.data.clone()\n\n m.load_state_dict(torch.load('tmp.pth'))\n opt2 = Adam(m.parameters(), 1e-3, wd=1e-2, decouple_wd=False)\n _mock_train(m, x.clone(), y.clone(), opt2)\n wgt2,bias2 = m.weight.data.clone(),m.bias.data.clone()\n \n test_close(wgt1,wgt2,eps=1e-3)\n test_close(bias1,bias2,eps=1e-3)\nfinally: os.remove('tmp.pth')", "_____no_output_____" ] ], [ [ "## Export -", "_____no_output_____" ] ], [ [ "#hide\nfrom nbdev.export import *\nnotebook2script()", "Converted 00_torch_core.ipynb.\nConverted 01_layers.ipynb.\nConverted 02_data.load.ipynb.\nConverted 03_data.core.ipynb.\nConverted 04_data.external.ipynb.\nConverted 05_data.transforms.ipynb.\nConverted 06_data.block.ipynb.\nConverted 07_vision.core.ipynb.\nConverted 08_vision.data.ipynb.\nConverted 09_vision.augment.ipynb.\nConverted 09b_vision.utils.ipynb.\nConverted 09c_vision.widgets.ipynb.\nConverted 10_tutorial.pets.ipynb.\nConverted 11_vision.models.xresnet.ipynb.\nConverted 12_optimizer.ipynb.\nConverted 13_callback.core.ipynb.\nConverted 13a_learner.ipynb.\nConverted 13b_metrics.ipynb.\nConverted 14_callback.schedule.ipynb.\nConverted 14a_callback.data.ipynb.\nConverted 15_callback.hook.ipynb.\nConverted 15a_vision.models.unet.ipynb.\nConverted 16_callback.progress.ipynb.\nConverted 17_callback.tracker.ipynb.\nConverted 18_callback.fp16.ipynb.\nConverted 18a_callback.training.ipynb.\nConverted 19_callback.mixup.ipynb.\nConverted 20_interpret.ipynb.\nConverted 20a_distributed.ipynb.\nConverted 21_vision.learner.ipynb.\nConverted 22_tutorial.imagenette.ipynb.\nConverted 23_tutorial.vision.ipynb.\nConverted 24_tutorial.siamese.ipynb.\nConverted 24_vision.gan.ipynb.\nConverted 30_text.core.ipynb.\nConverted 31_text.data.ipynb.\nConverted 32_text.models.awdlstm.ipynb.\nConverted 33_text.models.core.ipynb.\nConverted 34_callback.rnn.ipynb.\nConverted 35_tutorial.wikitext.ipynb.\nConverted 36_text.models.qrnn.ipynb.\nConverted 37_text.learner.ipynb.\nConverted 38_tutorial.text.ipynb.\nConverted 40_tabular.core.ipynb.\nConverted 41_tabular.data.ipynb.\nConverted 42_tabular.model.ipynb.\nConverted 43_tabular.learner.ipynb.\nConverted 44_tutorial.tabular.ipynb.\nConverted 45_collab.ipynb.\nConverted 46_tutorial.collab.ipynb.\nConverted 50_tutorial.datablock.ipynb.\nConverted 60_medical.imaging.ipynb.\nConverted 61_tutorial.medical_imaging.ipynb.\nConverted 65_medical.text.ipynb.\nConverted 70_callback.wandb.ipynb.\nConverted 71_callback.tensorboard.ipynb.\nConverted 72_callback.neptune.ipynb.\nConverted 73_callback.captum.ipynb.\nConverted 74_callback.cutmix.ipynb.\nConverted 97_test_utils.ipynb.\nConverted 99_pytorch_doc.ipynb.\nConverted index.ipynb.\nConverted tutorial.ipynb.\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
c52cde7d1202624eddffaa4bc1ede8886c81de3a
22,283
ipynb
Jupyter Notebook
files/playing with pcolormesh.ipynb
tarankalra/ipython-notebooks
7cdcad63c896985747db6e2a0529f1e4391792e8
[ "Unlicense" ]
32
2015-01-07T01:48:05.000Z
2022-03-02T07:07:42.000Z
files/playing with pcolormesh.ipynb
tarankalra/ipython-notebooks
7cdcad63c896985747db6e2a0529f1e4391792e8
[ "Unlicense" ]
1
2015-04-13T21:00:18.000Z
2015-04-13T21:00:18.000Z
files/playing with pcolormesh.ipynb
tarankalra/ipython-notebooks
7cdcad63c896985747db6e2a0529f1e4391792e8
[ "Unlicense" ]
30
2015-01-28T09:31:29.000Z
2022-03-07T03:08:28.000Z
218.460784
11,869
0.907508
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
c52ce3c614fd95bf71b212f01db03ba736d6491a
2,978
ipynb
Jupyter Notebook
Vehicle_Classification_App.ipynb
naveenjs98/Vehicle_ID
4dca30a4f9a2cd52322f12e3988e8ac167841bc7
[ "Apache-2.0" ]
null
null
null
Vehicle_Classification_App.ipynb
naveenjs98/Vehicle_ID
4dca30a4f9a2cd52322f12e3988e8ac167841bc7
[ "Apache-2.0" ]
null
null
null
Vehicle_Classification_App.ipynb
naveenjs98/Vehicle_ID
4dca30a4f9a2cd52322f12e3988e8ac167841bc7
[ "Apache-2.0" ]
null
null
null
21.897059
120
0.542982
[ [ [ "# The Vehicle Classification App", "_____no_output_____" ], [ "This app will take in a picture of the vehicle and will identify it as a car, motorbike or a bus.", "_____no_output_____" ] ], [ [ "from fastai.vision.all import *\nfrom fastai.vision.widgets import *", "_____no_output_____" ], [ "#import pathlib\n#temp = pathlib.PosixPath\n#pathlib.PosixPath = pathlib.WindowsPath", "_____no_output_____" ], [ "path = Path()\nlearn_inf = load_learner(path/'Vehicle_Classifier.pkl')\nbtn_upload = widgets.FileUpload()\nout_pl = widgets.Output()\nlbl_pred = widgets.Label()", "_____no_output_____" ], [ "def on_data_change(change):\n lbl_pred.value = ''\n img = PILImage.create(btn_upload.data[-1])\n out_pl.clear_output()\n with out_pl: display(img.to_thumb(128,128))\n pred,pred_idx,probs = learn_inf.predict(img)\n lbl_pred.value = f'Prediction: {pred}; Probability: {probs[pred_idx]:.04f}'", "_____no_output_____" ], [ "btn_upload.observe(on_data_change, names=['data'])", "_____no_output_____" ], [ "display(VBox([widgets.Label('Upload a picture of the vehicle'), btn_upload,out_pl, lbl_pred]))", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
c52ce798f3bdecedb319bda15fe7e3bda051e648
47,560
ipynb
Jupyter Notebook
module2-random-forests/Trevor_James_LS_DS_222_assignment.ipynb
trevorwjames/DS-Unit-2-Kaggle-Challenge
a0729a5511a9443aea1716fd9f8b1b0015ea5d15
[ "MIT" ]
null
null
null
module2-random-forests/Trevor_James_LS_DS_222_assignment.ipynb
trevorwjames/DS-Unit-2-Kaggle-Challenge
a0729a5511a9443aea1716fd9f8b1b0015ea5d15
[ "MIT" ]
null
null
null
module2-random-forests/Trevor_James_LS_DS_222_assignment.ipynb
trevorwjames/DS-Unit-2-Kaggle-Challenge
a0729a5511a9443aea1716fd9f8b1b0015ea5d15
[ "MIT" ]
null
null
null
36.528418
316
0.446131
[ [ [ "<a href=\"https://colab.research.google.com/github/trevorwjames/DS-Unit-2-Kaggle-Challenge/blob/master/module2-random-forests/Trevor_James_LS_DS_222_assignment.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "Lambda School Data Science\n\n*Unit 2, Sprint 2, Module 2*\n\n---", "_____no_output_____" ], [ "# Random Forests\n\n## Assignment\n- [ ] Read [“Adopting a Hypothesis-Driven Workflow”](http://archive.is/Nu3EI), a blog post by a Lambda DS student about the Tanzania Waterpumps challenge.\n- [ ] Continue to participate in our Kaggle challenge.\n- [ ] Define a function to wrangle train, validate, and test sets in the same way. Clean outliers and engineer features.\n- [ ] Try Ordinal Encoding.\n- [ ] Try a Random Forest Classifier.\n- [ ] Submit your predictions to our Kaggle competition. (Go to our Kaggle InClass competition webpage. Use the blue **Submit Predictions** button to upload your CSV file. Or you can use the Kaggle API to submit your predictions.)\n- [ ] Commit your notebook to your fork of the GitHub repo.\n\n## Stretch Goals\n\n### Doing\n- [ ] Add your own stretch goal(s) !\n- [ ] Do more exploratory data analysis, data cleaning, feature engineering, and feature selection.\n- [ ] Try other [categorical encodings](https://contrib.scikit-learn.org/category_encoders/).\n- [ ] Get and plot your feature importances.\n- [ ] Make visualizations and share on Slack.\n\n### Reading\n\nTop recommendations in _**bold italic:**_\n\n#### Decision Trees\n- A Visual Introduction to Machine Learning, [Part 1: A Decision Tree](http://www.r2d3.us/visual-intro-to-machine-learning-part-1/), and _**[Part 2: Bias and Variance](http://www.r2d3.us/visual-intro-to-machine-learning-part-2/)**_\n- [Decision Trees: Advantages & Disadvantages](https://christophm.github.io/interpretable-ml-book/tree.html#advantages-2)\n- [How a Russian mathematician constructed a decision tree — by hand — to solve a medical problem](http://fastml.com/how-a-russian-mathematician-constructed-a-decision-tree-by-hand-to-solve-a-medical-problem/)\n- [How decision trees work](https://brohrer.github.io/how_decision_trees_work.html)\n- [Let’s Write a Decision Tree Classifier from Scratch](https://www.youtube.com/watch?v=LDRbO9a6XPU)\n\n#### Random Forests\n- [_An Introduction to Statistical Learning_](http://www-bcf.usc.edu/~gareth/ISL/), Chapter 8: Tree-Based Methods\n- [Coloring with Random Forests](http://structuringtheunstructured.blogspot.com/2017/11/coloring-with-random-forests.html)\n- _**[Random Forests for Complete Beginners: The definitive guide to Random Forests and Decision Trees](https://victorzhou.com/blog/intro-to-random-forests/)**_\n\n#### Categorical encoding for trees\n- [Are categorical variables getting lost in your random forests?](https://roamanalytics.com/2016/10/28/are-categorical-variables-getting-lost-in-your-random-forests/)\n- [Beyond One-Hot: An Exploration of Categorical Variables](http://www.willmcginnis.com/2015/11/29/beyond-one-hot-an-exploration-of-categorical-variables/)\n- _**[Categorical Features and Encoding in Decision Trees](https://medium.com/data-design/visiting-categorical-features-and-encoding-in-decision-trees-53400fa65931)**_\n- _**[Coursera — How to Win a Data Science Competition: Learn from Top Kagglers — Concept of mean encoding](https://www.coursera.org/lecture/competitive-data-science/concept-of-mean-encoding-b5Gxv)**_\n- [Mean (likelihood) encodings: a comprehensive study](https://www.kaggle.com/vprokopev/mean-likelihood-encodings-a-comprehensive-study)\n- [The Mechanics of Machine Learning, Chapter 6: Categorically Speaking](https://mlbook.explained.ai/catvars.html)\n\n#### Imposter Syndrome\n- [Effort Shock and Reward Shock (How The Karate Kid Ruined The Modern World)](http://www.tempobook.com/2014/07/09/effort-shock-and-reward-shock/)\n- [How to manage impostor syndrome in data science](https://towardsdatascience.com/how-to-manage-impostor-syndrome-in-data-science-ad814809f068)\n- [\"I am not a real data scientist\"](https://brohrer.github.io/imposter_syndrome.html)\n- _**[Imposter Syndrome in Data Science](https://caitlinhudon.com/2018/01/19/imposter-syndrome-in-data-science/)**_\n\n\n### More Categorical Encodings\n\n**1.** The article **[Categorical Features and Encoding in Decision Trees](https://medium.com/data-design/visiting-categorical-features-and-encoding-in-decision-trees-53400fa65931)** mentions 4 encodings:\n\n- **\"Categorical Encoding\":** This means using the raw categorical values as-is, not encoded. Scikit-learn doesn't support this, but some tree algorithm implementations do. For example, [Catboost](https://catboost.ai/), or R's [rpart](https://cran.r-project.org/web/packages/rpart/index.html) package.\n- **Numeric Encoding:** Synonymous with Label Encoding, or \"Ordinal\" Encoding with random order. We can use [category_encoders.OrdinalEncoder](https://contrib.scikit-learn.org/category_encoders/ordinal.html).\n- **One-Hot Encoding:** We can use [category_encoders.OneHotEncoder](https://contrib.scikit-learn.org/category_encoders/onehot.html).\n- **Binary Encoding:** We can use [category_encoders.BinaryEncoder](https://contrib.scikit-learn.org/category_encoders/binary.html).\n\n\n**2.** The short video \n**[Coursera — How to Win a Data Science Competition: Learn from Top Kagglers — Concept of mean encoding](https://www.coursera.org/lecture/competitive-data-science/concept-of-mean-encoding-b5Gxv)** introduces an interesting idea: use both X _and_ y to encode categoricals.\n\nCategory Encoders has multiple implementations of this general concept:\n\n- [CatBoost Encoder](https://contrib.scikit-learn.org/category_encoders/catboost.html)\n- [Generalized Linear Mixed Model Encoder](https://contrib.scikit-learn.org/category_encoders/glmm.html)\n- [James-Stein Encoder](https://contrib.scikit-learn.org/category_encoders/jamesstein.html)\n- [Leave One Out](https://contrib.scikit-learn.org/category_encoders/leaveoneout.html)\n- [M-estimate](https://contrib.scikit-learn.org/category_encoders/mestimate.html)\n- [Target Encoder](https://contrib.scikit-learn.org/category_encoders/targetencoder.html)\n- [Weight of Evidence](https://contrib.scikit-learn.org/category_encoders/woe.html)\n\nCategory Encoder's mean encoding implementations work for regression problems or binary classification problems. \n\nFor multi-class classification problems, you will need to temporarily reformulate it as binary classification. For example:\n\n```python\nencoder = ce.TargetEncoder(min_samples_leaf=..., smoothing=...) # Both parameters > 1 to avoid overfitting\nX_train_encoded = encoder.fit_transform(X_train, y_train=='functional')\nX_val_encoded = encoder.transform(X_train, y_val=='functional')\n```\n\nFor this reason, mean encoding won't work well within pipelines for multi-class classification problems.\n\n**3.** The **[dirty_cat](https://dirty-cat.github.io/stable/)** library has a Target Encoder implementation that works with multi-class classification.\n\n```python\n dirty_cat.TargetEncoder(clf_type='multiclass-clf')\n```\nIt also implements an interesting idea called [\"Similarity Encoder\" for dirty categories](https://www.slideshare.net/GaelVaroquaux/machine-learning-on-non-curated-data-154905090).\n\nHowever, it seems like dirty_cat doesn't handle missing values or unknown categories as well as category_encoders does. And you may need to use it with one column at a time, instead of with your whole dataframe.\n\n**4. [Embeddings](https://www.kaggle.com/colinmorris/embedding-layers)** can work well with sparse / high cardinality categoricals.\n\n_**I hope it’s not too frustrating or confusing that there’s not one “canonical” way to encode categoricals. It’s an active area of research and experimentation — maybe you can make your own contributions!**_", "_____no_output_____" ], [ "### Setup\n\nYou can work locally (follow the [local setup instructions](https://lambdaschool.github.io/ds/unit2/local/)) or on Colab (run the code cell below).", "_____no_output_____" ] ], [ [ "%%capture\nimport sys\n\n# If you're on Colab:\nif 'google.colab' in sys.modules:\n DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/'\n !pip install category_encoders==2.*\n\n# If you're working locally:\nelse:\n DATA_PATH = '../data/'", "_____no_output_____" ], [ "import pandas as pd\nfrom sklearn.model_selection import train_test_split\n\ntrain = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'), \n pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv'))\ntest = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv')\nsample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv')\n\n# adding in a val during the data set loading\ntrain, val = train_test_split(train, train_size=.80, test_size=.20,\n stratify=train['status_group'], random_state=42)\n\ntrain.shape, val.shape, test.shape", "_____no_output_____" ] ], [ [ "# Data Wrangle Function", "_____no_output_____" ] ], [ [ "import numpy as np\n", "_____no_output_____" ], [ "def wrangle(X):\n\n # Working with a copy\n X = X.copy()\n\n # dealing with very small near 0 values \n X['latitude'] = X['latitude'].replace(-2e-08, 0)\n\n # replacing the missing values that are actually 0 with NaNs\n cols_with_zeros = ['longitude', 'latitude']\n for col in cols_with_zeros:\n X[col] = X[col].replace(0, np.nan)\n\n # dropping repetive columns\n X = X.drop(columns=['quantity_group', 'id', 'waterpoint_type_group', 'payment_type', \n 'extraction_type_group'])\n\n # convert date_recorded feature to date_time\n X['date_recorded'] = pd.to_datetime(X['date_recorded'])\n\n return X", "_____no_output_____" ], [ "train = wrangle(train)\nval = wrangle(val)\ntest = wrangle(test)", "_____no_output_____" ], [ "train.shape", "_____no_output_____" ], [ "X_train.shape", "_____no_output_____" ] ], [ [ "# Feature Engineering", "_____no_output_____" ] ], [ [ "# data cleaning of high cardinality features\ntarget = 'status_group'\n\ntrain_features = train.drop(columns=target)\n\nnumeric_features = train_features.select_dtypes(include='number').columns.tolist()\n\ncardinality = train_features.select_dtypes(exclude='number').nunique()\n\ncategorical_features = cardinality[cardinality <=50].index.tolist()\n\nfeatures = numeric_features + categorical_features", "_____no_output_____" ], [ "train.head(5)", "_____no_output_____" ], [ "X_train['waterpoint_type'].unique()", "_____no_output_____" ], [ "X_train['waterpoint_type_group'].unique()", "_____no_output_____" ], [ "X_train['payment'].unique()", "_____no_output_____" ], [ "X_train['payment_type'].unique()", "_____no_output_____" ], [ "X_train['source_class'].unique()", "_____no_output_____" ] ], [ [ "# Feature/Target Separation", "_____no_output_____" ] ], [ [ "X_train = train[features]\ny_train = train[target]\nX_val = val[features]\ny_val = val[target]\nX_test = test[features]", "_____no_output_____" ] ], [ [ "# Model Building", "_____no_output_____" ] ], [ [ "import category_encoders as ce\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.experimental import enable_iterative_imputer\nfrom sklearn.impute import IterativeImputer\nfrom sklearn.impute import KNNImputer\nfrom sklearn.ensemble import RandomForestClassifier", "_____no_output_____" ] ], [ [ "#### Trying a simple model using Random Forrest\n - Including: OHE, Simple Imputer", "_____no_output_____" ] ], [ [ "# Starding with a simple model using RF\nmodel = Pipeline([\n ('ohe', ce.OneHotEncoder(use_cat_names=True, cols=categorical_features)),\n ('impute', SimpleImputer()),\n ('classifier', RandomForestClassifier(random_state=42))\n])\n\n#fitting model\nmodel.fit(X_train, y_train)\n\n#train data score\nprint('Training Data Accuracy:', model.score(X_train, y_train))\nprint('Val data accuracy:', model.score(X_val, y_val))", "Training Data Accuracy: 0.9954545454545455\nVal data accuracy: 0.8027777777777778\n" ] ], [ [ "We see an inrease in Val data accuracy, although from the training data we see a possibility of an overfit model. \n", "_____no_output_____" ], [ "#### Stay within random forrest\n - using a Ordinal Encoder instead of OHE", "_____no_output_____" ] ], [ [ "model = Pipeline([\n ('OE', ce.OrdinalEncoder(cols=categorical_features)),\n ('impute', SimpleImputer()),\n ('classifier', RandomForestClassifier(random_state=42))\n])\n\n#fitting model\nmodel.fit(X_train, y_train)\n\n#train data score\nprint('Training Data Accuracy:', model.score(X_train, y_train))\nprint('Val data accuracy:', model.score(X_val, y_val))", "Training Data Accuracy: 0.9954545454545455\nVal data accuracy: 0.8065656565656566\n" ], [ "model = Pipeline([\n ('OE', ce.OrdinalEncoder(cols=categorical_features)),\n ('impute', KNNImputer()),\n ('classifier', RandomForestClassifier(random_state=42))\n])\nmodel", "_____no_output_____" ], [ "# max_depth = 17\n\nmodel = Pipeline([\n ('OE', ce.OrdinalEncoder(cols=categorical_features)),\n ('impute', SimpleImputer()),\n ('classifier', RandomForestClassifier(bootstrap=True, ccp_alpha=0.0,\n class_weight=None, criterion='gini',\n max_depth=18, max_features='auto',\n max_leaf_nodes=None, max_samples=None,\n min_impurity_decrease=0.0,\n min_impurity_split=None,\n min_samples_leaf=1, min_samples_split=2,\n min_weight_fraction_leaf=0.0,\n n_estimators=100, n_jobs=None,\n oob_score=False, random_state=42,\n verbose=0, warm_start=False))\n])\n\n#fitting model\nmodel.fit(X_train, y_train)\n\n#train data score\nprint('Training Data Accuracy:', model.score(X_train, y_train))\nprint('Val data accuracy:', model.score(X_val, y_val))", "Training Data Accuracy: 0.9287247474747474\nVal data accuracy: 0.8067340067340067\n" ] ], [ [ "Max_depth = 17 created a better generalizable model. Alhough did not yeild as high of a predicition on the leaderboard", "_____no_output_____" ] ], [ [ "model = Pipeline([\n ('OE', ce.OrdinalEncoder(cols=categorical_features)),\n ('impute', SimpleImputer()),\n ('classifier', RandomForestClassifier(bootstrap=True, ccp_alpha=0.0,\n class_weight=None, criterion='gini',\n max_depth=18, max_features='auto',\n max_leaf_nodes=None, max_samples=None,\n min_impurity_decrease=0.0,\n min_impurity_split=None,\n min_samples_leaf=1, min_samples_split=2,\n min_weight_fraction_leaf=0.0,\n n_estimators=100, n_jobs=None,\n oob_score=False, random_state=42,\n verbose=0, warm_start=False))\n])\n\n#fitting model\nmodel.fit(X_train, y_train)\n\n#train/val data score\nprint('Training Data Accuracy:', model.score(X_train, y_train))\nprint('Val data accuracy:', model.score(X_val, y_val))", "Training Data Accuracy: 0.9287247474747474\nVal data accuracy: 0.8067340067340067\n" ], [ "import matplotlib.pyplot as plt\n\n# this doesnt want to plot for some reason. All the conditions work. \nencoder = model.named_steps['OE']\nencoded_columns = encoder.transform(X_val).columns\nimportances = pd.Series(model['classifier'].feature_importances_, encoded_columns)\nplt.figure(figsize=(10,40))\nimportances.sort_values().plot.barh()", "_____no_output_____" ] ], [ [ "# Submission to Kaggle CSV", "_____no_output_____" ] ], [ [ "X_train2 = pd.concat([X_train, X_val])\ny_train2 = pd.concat([y_train, y_val])", "_____no_output_____" ], [ "model.fit(X_train2, y_train2)\n\nprint('Training Data Accuracy:', model.score(X_train, y_train))\nprint('Val data accuracy:', model.score(X_val, y_val))", "Training Data Accuracy: 0.9092171717171718\nVal data accuracy: 0.9106902356902357\n" ], [ "y_pred = model.predict(X_test)\n\nsubmission = sample_submission.copy()\nsubmission['status_group'] = y_pred\nsubmission.to_csv('trevor-james-submission5.csv', index=False)", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
c52ced144d55b4005c3f5493eba7924527ce09ef
3,960
ipynb
Jupyter Notebook
0.17/_downloads/05740a216a818e61b7fb37adeda46340/ftclient_rt_average.ipynb
drammock/mne-tools.github.io
5d3a104d174255644d8d5335f58036e32695e85d
[ "BSD-3-Clause" ]
null
null
null
0.17/_downloads/05740a216a818e61b7fb37adeda46340/ftclient_rt_average.ipynb
drammock/mne-tools.github.io
5d3a104d174255644d8d5335f58036e32695e85d
[ "BSD-3-Clause" ]
null
null
null
0.17/_downloads/05740a216a818e61b7fb37adeda46340/ftclient_rt_average.ipynb
drammock/mne-tools.github.io
5d3a104d174255644d8d5335f58036e32695e85d
[ "BSD-3-Clause" ]
null
null
null
73.333333
2,048
0.620707
[ [ [ "%matplotlib inline", "_____no_output_____" ] ], [ [ "\n# Compute real-time evoked responses with FieldTrip client\n\n\nThis example demonstrates how to connect the MNE real-time\nsystem to the Fieldtrip buffer using FieldTripClient class.\n\nThis example was tested in simulation mode\n\nneuromag2ft --file MNE-sample-data/MEG/sample/sample_audvis_raw.fif\n\nusing a modified version of neuromag2ft available at\n\nhttps://staff.washington.edu/larsoner/minimal_cmds.tar.gz\n\nto run the FieldTrip buffer. Then running this example acquires the\ndata on the client side.\n\nSince the Fieldtrip buffer does not contain all the\nmeasurement information required by the MNE real-time processing\npipeline, an info dictionary must be provided to instantiate FieldTripClient.\nAlternatively, the MNE-Python script will try to guess the missing\nmeasurement info from the Fieldtrip Header object.\n\nTogether with RtEpochs, this can be used to compute evoked\nresponses using moving averages.\n\n", "_____no_output_____" ] ], [ [ "# Author: Mainak Jas <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport matplotlib.pyplot as plt\n\nimport mne\nfrom mne.viz import plot_events\nfrom mne.realtime import FieldTripClient, RtEpochs\n\nprint(__doc__)\n\n# select the left-auditory condition\nevent_id, tmin, tmax = 1, -0.2, 0.5\n\n# user must provide list of bad channels because\n# FieldTrip header object does not provide that\nbads = ['MEG 2443', 'EEG 053']\n\nplt.ion() # make plot interactive\n_, ax = plt.subplots(2, 1, figsize=(8, 8)) # create subplots\n\nwith FieldTripClient(host='localhost', port=1972,\n tmax=150, wait_max=10) as rt_client:\n\n # get measurement info guessed by MNE-Python\n raw_info = rt_client.get_measurement_info()\n\n # select gradiometers\n picks = mne.pick_types(raw_info, meg='grad', eeg=False, eog=True,\n stim=True, exclude=bads)\n\n # create the real-time epochs object\n rt_epochs = RtEpochs(rt_client, event_id, tmin, tmax,\n stim_channel='STI 014', picks=picks,\n reject=dict(grad=4000e-13, eog=150e-6),\n decim=1, isi_max=10.0, proj=None)\n\n # start the acquisition\n rt_epochs.start()\n\n for ii, ev in enumerate(rt_epochs.iter_evoked()):\n print(\"Just got epoch %d\" % (ii + 1))\n\n ev.pick_types(meg=True, eog=False)\n if ii == 0:\n evoked = ev\n else:\n evoked = mne.combine_evoked([evoked, ev], weights='nave')\n\n ax[0].cla()\n ax[1].cla() # clear axis\n\n plot_events(rt_epochs.events[-5:], sfreq=ev.info['sfreq'],\n first_samp=-rt_client.tmin_samp, axes=ax[0])\n\n # plot on second subplot\n evoked.plot(axes=ax[1], selectable=False, time_unit='s')\n ax[1].set_title('Evoked response for gradiometer channels'\n '(event_id = %d)' % event_id)\n\n plt.pause(0.05)\n plt.draw()\n\n rt_epochs.stop()\n plt.close()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ] ]
c52cedcac7a5553e514bdcc3e0750f3e5dae1e36
102,426
ipynb
Jupyter Notebook
COLLAB-GraphRNN.ipynb
jonathangomesselman/graph-generation
72a8be30d54a414fcca9ea0fad1a62e38b85ee2f
[ "MIT" ]
1
2021-12-11T16:03:06.000Z
2021-12-11T16:03:06.000Z
COLLAB-GraphRNN.ipynb
jonathangomesselman/graph-generation
72a8be30d54a414fcca9ea0fad1a62e38b85ee2f
[ "MIT" ]
null
null
null
COLLAB-GraphRNN.ipynb
jonathangomesselman/graph-generation
72a8be30d54a414fcca9ea0fad1a62e38b85ee2f
[ "MIT" ]
1
2021-12-11T16:03:09.000Z
2021-12-11T16:03:09.000Z
669.45098
38,206
0.941812
[ [ [ "# Fun some setup for the project\n# Silence annoying pytorch deprecated warnings\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nimport matplotlib.pyplot as plt\nfrom eval_nll import *\nimport numpy as np\n%matplotlib inline\n\n# for auto-reloading extenrnal modules\n# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython\n%load_ext autoreload\n%autoreload 2", "_____no_output_____" ] ], [ [ "# COLLAB Multi Dataset Notebook. Treat COLLAB class 1 as the normal dataset and COLLAB classes 2/3 as the anomalous dataset ", "_____no_output_____" ] ], [ [ "nll_dir = 'nll/COLLAB_1'", "_____no_output_____" ], [ "# Step 1\n# Load the training and test nll predictions to test generalization\ntrain_nlls = load_avg_nlls(nll_dir, 'train')\ntest_nlls = load_avg_nlls(nll_dir, 'test')\n\n# Plot distribtutions\nfig, ax_compare = compare_dist([train_nlls, test_nlls], ['COLLAB_1 Train', 'COLLAB_1 Test'], 'Train vs. Test Distributions')", "_____no_output_____" ], [ "# Step 2 \n# Compare the test graphs from the normal class nlls with the anomalous graph nlls\nanom_nlls = load_avg_nlls(nll_dir, 'COLLAB_2')\n\n# Plot distribtutions\nfig, ax_compare = compare_dist([train_nlls, test_nlls, anom_nlls], ['COLLAB_1 Train (norm)', 'COLLAB_1 Test (norm)', 'COLLAB_2 (anom)'], 'Nomral Vs. Anomalous Class distributions')", "_____no_output_____" ], [ "# Step 2 \n# Compare the test graphs from the normal class nlls with the anomalous graph nlls\nanom_nlls = load_avg_nlls(nll_dir, 'COLLAB_3')\n\n# Plot distribtutions\nfig, ax_compare = compare_dist([train_nlls, test_nlls, anom_nlls], ['COLLAB_1 Train (norm)', 'COLLAB_1 Test (norm)', 'COLLAB_3 (anom)'], 'Nomral Vs. Anomalous Class distributions')", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
c52cee45182de8dfc373cb815fa8d66a54e66d06
22,395
ipynb
Jupyter Notebook
Lab3/linear_svm.ipynb
linyuehzzz/5526_neural_networks
5e2e5664c4c84d33d05f1b97e735559a29d937cc
[ "MIT" ]
null
null
null
Lab3/linear_svm.ipynb
linyuehzzz/5526_neural_networks
5e2e5664c4c84d33d05f1b97e735559a29d937cc
[ "MIT" ]
null
null
null
Lab3/linear_svm.ipynb
linyuehzzz/5526_neural_networks
5e2e5664c4c84d33d05f1b97e735559a29d937cc
[ "MIT" ]
null
null
null
65.867647
11,706
0.715785
[ [ [ "<a href=\"https://colab.research.google.com/github/linyuehzzz/5526_neural_networks/blob/master/linear_svm.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "##**Lab 3**\nThis code trains an linear SVM to determine if a genomic sequence is an ncRNA.. \nYue Lin (lin.3326 at osu.edu) \nCreated: 11/2/2020", "_____no_output_____" ] ], [ [ "from google.colab import drive\ndrive.mount('/content/gdrive')", "Mounted at /content/gdrive\n" ] ], [ [ "#### **Set up libraries**", "_____no_output_____" ] ], [ [ "!pip install libsvm", "Collecting libsvm\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/4b/11/c7700d0cd3a21eef2d7d996256277fc640ccd4f84717c10228cb6c1567dc/libsvm-3.23.0.4.tar.gz (170kB)\n\r\u001b[K |██ | 10kB 15.4MB/s eta 0:00:01\r\u001b[K |███▉ | 20kB 1.8MB/s eta 0:00:01\r\u001b[K |█████▊ | 30kB 2.1MB/s eta 0:00:01\r\u001b[K |███████▊ | 40kB 2.4MB/s eta 0:00:01\r\u001b[K |█████████▋ | 51kB 2.0MB/s eta 0:00:01\r\u001b[K |███████████▌ | 61kB 2.3MB/s eta 0:00:01\r\u001b[K |█████████████▌ | 71kB 2.5MB/s eta 0:00:01\r\u001b[K |███████████████▍ | 81kB 2.7MB/s eta 0:00:01\r\u001b[K |█████████████████▎ | 92kB 2.9MB/s eta 0:00:01\r\u001b[K |███████████████████▏ | 102kB 2.9MB/s eta 0:00:01\r\u001b[K |█████████████████████▏ | 112kB 2.9MB/s eta 0:00:01\r\u001b[K |███████████████████████ | 122kB 2.9MB/s eta 0:00:01\r\u001b[K |█████████████████████████ | 133kB 2.9MB/s eta 0:00:01\r\u001b[K |███████████████████████████ | 143kB 2.9MB/s eta 0:00:01\r\u001b[K |████████████████████████████▉ | 153kB 2.9MB/s eta 0:00:01\r\u001b[K |██████████████████████████████▊ | 163kB 2.9MB/s eta 0:00:01\r\u001b[K |████████████████████████████████| 174kB 2.9MB/s \n\u001b[?25hBuilding wheels for collected packages: libsvm\n Building wheel for libsvm (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for libsvm: filename=libsvm-3.23.0.4-cp36-cp36m-linux_x86_64.whl size=233323 sha256=d3cf94793c0a52b1276ba83e643ba26030c97fbe42440f6b2f5fcc9169f3888c\n Stored in directory: /root/.cache/pip/wheels/5e/9e/b5/dbb033107407eec2f52b8cd24cf024a4b9ec8b62ea5aee995a\nSuccessfully built libsvm\nInstalling collected packages: libsvm\nSuccessfully installed libsvm-3.23.0.4\n" ], [ "from libsvm.svmutil import *\nfrom sklearn.datasets import load_svmlight_file\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "#### **Read training and test data**", "_____no_output_____" ] ], [ [ "def get_data(filename):\n data = load_svmlight_file(filename)\n return data", "_____no_output_____" ] ], [ [ "#### **Classification using linear SVMs**", "_____no_output_____" ] ], [ [ "def linear_svm(train_data, c, test_data):\n # Get data\n train_x = train_data[0]\n train_y = train_data[1]\n test_x = test_data[0]\n test_y = test_data[1]\n\n # Train svm\n prob = svm_problem(train_y, train_x)\n param_str = '-t 0 -c 2e' + str(c)\n print(\"Param: \" + param_str)\n param = svm_parameter(param_str)\n m = svm_train(prob, param)\n\n # Test\n p_label, p_acc, p_val = svm_predict(test_y, test_x, m)\n return p_acc", "_____no_output_____" ] ], [ [ "#### **Wrapper**", "_____no_output_____" ], [ "Read training and test data", "_____no_output_____" ] ], [ [ "%cd \"/content/gdrive/My Drive/Colab Notebooks/cse5526\"\n\n# Read training and test data\ntrain_file = \"ncRNA_s.train.txt\"\ntest_file = \"ncRNA_s.test.txt\"\n\ntrain_data = get_data(train_file)\ntest_data = get_data(train_file)", "/content/gdrive/My Drive/Colab Notebooks/cse5526\n" ] ], [ [ "Classification using linear SVMs", "_____no_output_____" ] ], [ [ "acc = []\nfor c in range(-4, 9):\n p_acc = linear_svm(train_data, c, test_data)\n acc.append(p_acc[0])", "Param: -t 0 -c 2e-4\nAccuracy = 67.8% (1356/2000) (classification)\nParam: -t 0 -c 2e-3\nAccuracy = 67.8% (1356/2000) (classification)\nParam: -t 0 -c 2e-2\nAccuracy = 67.8% (1356/2000) (classification)\nParam: -t 0 -c 2e-1\nAccuracy = 67.8% (1356/2000) (classification)\nParam: -t 0 -c 2e0\nAccuracy = 95.3% (1906/2000) (classification)\nParam: -t 0 -c 2e1\nAccuracy = 95.15% (1903/2000) (classification)\nParam: -t 0 -c 2e2\nAccuracy = 95.2% (1904/2000) (classification)\nParam: -t 0 -c 2e3\nAccuracy = 95.25% (1905/2000) (classification)\nParam: -t 0 -c 2e4\nAccuracy = 95.25% (1905/2000) (classification)\nParam: -t 0 -c 2e5\nAccuracy = 95.4% (1908/2000) (classification)\nParam: -t 0 -c 2e6\nAccuracy = 90.5% (1810/2000) (classification)\nParam: -t 0 -c 2e7\nAccuracy = 90.95% (1819/2000) (classification)\nParam: -t 0 -c 2e8\nAccuracy = 93.55% (1871/2000) (classification)\n" ] ], [ [ "Plot prediction accuracy using linear SVMs", "_____no_output_____" ] ], [ [ "cost = ['2e-4', '2e-3', '2e-2', '2e-1', '2e0', '2e1', '2e2', '2e3', '2e4', '2e5', '2e6', '2e7', '2e8']\nplt.plot(cost, acc)\nplt.xlabel('Cost')\nplt.ylabel('Accuracy')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
c52ceee3f12c694c16356d6ff0f54f19d8b76be1
902,682
ipynb
Jupyter Notebook
data-visualization-iris-species.ipynb
saimanvitha02/data-visualization-iris
9d41730e5ad0697fba32028f24bc351efc62d6c9
[ "MIT" ]
1
2021-08-05T07:54:52.000Z
2021-08-05T07:54:52.000Z
data-visualization-iris-species.ipynb
saimanvitha02/data-visualization-iris
9d41730e5ad0697fba32028f24bc351efc62d6c9
[ "MIT" ]
null
null
null
data-visualization-iris-species.ipynb
saimanvitha02/data-visualization-iris
9d41730e5ad0697fba32028f24bc351efc62d6c9
[ "MIT" ]
null
null
null
1,499.471761
273,632
0.956496
[ [ [ "import pandas as pd\nimport warnings \nwarnings.filterwarnings(\"ignore\")\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nsns.set(style=\"white\", color_codes=True)\n\niris = pd.read_csv(\"iris.csv\") # the iris dataset is now a Pandas DataFrame\n\niris.head()\n", "_____no_output_____" ], [ "iris[\"Species\"].value_counts()", "_____no_output_____" ], [ "# .plot extension from pandas framework is used to make scatterplots\niris.plot(kind=\"scatter\", x=\"SepalLengthCm\", y=\"SepalWidthCm\")", "*c* argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with *x* & *y*. Please use the *color* keyword-argument or provide a 2-D array with a single row if you intend to specify the same RGB or RGBA value for all points.\n" ], [ "#A seaborn jointplot shows bivariate scatterplots and univariate histograms\nsns.jointplot(x=\"SepalLengthCm\", y=\"SepalWidthCm\", data=iris, size=5)", "_____no_output_____" ], [ "#Seaborn's FacetGrid is used to color the scatterplot by species\nsns.FacetGrid(iris, hue=\"Species\", size=5) \\\n .map(plt.scatter, \"SepalLengthCm\", \"SepalWidthCm\") \\\n .add_legend()", "_____no_output_____" ], [ "#Boxplots are used to examine the individual feature in seaborn.\nsns.boxplot(x=\"Species\", y=\"PetalLengthCm\", data=iris)", "_____no_output_____" ], [ "#One way to extend this plot is by adding a layer of individual points on top of it through Seaborn's striplot. \n#We'll use jitter=True so that all the points don't fall in single vertical lines above the species\nax = sns.boxplot(x=\"Species\", y=\"PetalLengthCm\", data=iris)\nax = sns.stripplot(x=\"Species\", y=\"PetalLengthCm\", data=iris, jitter=True, edgecolor=\"gray\")", "_____no_output_____" ], [ "#A violin plot combines the benefits of the previous two plots and simplifies them denser regions of the data are fatter, and sparser thiner in a violin plot\nsns.violinplot(x=\"Species\", y=\"PetalLengthCm\", data=iris, size=6)", "_____no_output_____" ], [ "#A final seaborn plot useful for looking at univariate relations is the kdeplot, which creates and visualizes a kernel density estimate of the underlying feature \nsns.FacetGrid(iris, hue=\"Species\", size=6) \\\n .map(sns.kdeplot, \"PetalLengthCm\") \\\n .add_legend()", "_____no_output_____" ], [ "#Another useful seaborn plot is the pairplot, which shows the bivariate relation between each pair of features. \n#From the pairplot, we'll see that the Iris-setosa species is separataed from the other two across all feature combinations\n\nsns.pairplot(iris.drop(\"Id\", axis=1), hue=\"Species\", size=3)", "_____no_output_____" ], [ "#The diagonal elements in a pairplot show the histogram by default. \n#We can update these elements to show other things, such as a kde.\nsns.pairplot(iris.drop(\"Id\", axis=1), hue=\"Species\", size=3, diag_kind=\"kde\")", "_____no_output_____" ], [ "#Boxplot with Pandas\niris.drop(\"Id\", axis=1).boxplot(by=\"Species\", figsize=(12, 6))", "_____no_output_____" ], [ "#One cool more sophisticated technique pandas has available is called Andrews Curves. \n#Andrews Curves involve using attributes of samples as coefficients for Fourier series and then plotting these\nfrom pandas.plotting import andrews_curves\nandrews_curves(iris.drop(\"Id\", axis=1), \"Species\")", "_____no_output_____" ], [ "#Another multivariate visualization technique pandas has is parallel_coordinates. \n#Parallel coordinates plots each feature on a separate column & then draws lines connecting the features for each data sample\nfrom pandas.plotting import parallel_coordinates\nparallel_coordinates(iris.drop(\"Id\", axis=1), \"Species\")", "_____no_output_____" ], [ "#A final multivariate visualization technique pandas has is radviz which puts each feature as a point on a 2D plane, \n#and then simulates having each sample attached to those points through a spring weighted by the relative value for that feature\nfrom pandas.plotting import radviz\nradviz(iris.drop(\"Id\", axis=1), \"Species\")", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
c52cf24551e7274b702493f009b9a34319897b04
20,803
ipynb
Jupyter Notebook
notebooks/train-model.ipynb
jreig/kaggle-histopathologic-cancer-detection
071810ffb6b70a8cf711cf7050f95426bc152a81
[ "MIT" ]
null
null
null
notebooks/train-model.ipynb
jreig/kaggle-histopathologic-cancer-detection
071810ffb6b70a8cf711cf7050f95426bc152a81
[ "MIT" ]
null
null
null
notebooks/train-model.ipynb
jreig/kaggle-histopathologic-cancer-detection
071810ffb6b70a8cf711cf7050f95426bc152a81
[ "MIT" ]
null
null
null
38.739292
2,924
0.560063
[ [ [ "# Set up environment for GPU (!important)", "_____no_output_____" ] ], [ [ "import os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '1' \nos.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'\n\nimport keras\nimport tensorflow as tf\nprint(\"TF version\", tf.__version__)\nprint(\"keras version:\", keras.__version__)\nphysical_devices = tf.config.list_physical_devices('GPU')\nprint(\"Num GPUs Available: \", len(physical_devices))\n\nif len(physical_devices) > 0:\n tf.config.experimental.set_memory_growth(physical_devices[0], True)\n# If this last line give an error, stop the notebook kernel, reset it and run again", "TF version 2.4.1\nkeras version: 2.4.3\nNum GPUs Available: 1\n" ] ], [ [ "# Train module\n\n## Import required modules to process data", "_____no_output_____" ] ], [ [ "from keras_preprocessing.image import ImageDataGenerator\nimport pandas as pd\nimport numpy as np", "_____no_output_____" ] ], [ [ "## Train configuration", "_____no_output_____" ] ], [ [ "IMAGE_SIZE = 96 # Images are 96x96 px\nIMAGE_CHANNELS = 3 # Images are 3 chanell (RGB)", "_____no_output_____" ] ], [ [ "## Load train data info", "_____no_output_____" ] ], [ [ "# Function to append image file extension to train img ids\ndef appendExt(id):\n return id + \".tif\"\n\n# Load CSVs\ntraindf = pd.read_csv(\"/dataset/train_labels.csv\")\n\n# ========= FOR PROTOTYPING ONLY =========== #\n# traindf = traindf[:100]\n# ========================================== #\n\n# Add extensions to id files\ntraindf[\"id\"] = traindf[\"id\"].apply(appendExt)\n\n# Labels must be strings\ntraindf[\"label\"] = traindf[\"label\"].astype(str)\n\n# removing this image because it caused a training error previously\ntraindf[traindf['id'] != 'dd6dfed324f9fcb6f93f46f32fc800f2ec196be2']\n\n# removing this image because it's black\ntraindf[traindf['id'] != '9369c7278ec8bcc6c880d99194de09fc2bd4efbe']", "_____no_output_____" ] ], [ [ "## Build image data generator", "_____no_output_____" ] ], [ [ "datagen = ImageDataGenerator(rescale=1./255., validation_split=0.25)\n\ntrain_generator=datagen.flow_from_dataframe(\n dataframe = traindf,\n directory = \"/dataset/train/\",\n x_col = \"id\",\n y_col = \"label\",\n subset = \"training\",\n target_size = (IMAGE_SIZE, IMAGE_SIZE),\n batch_size = 10,\n shuffle = True,\n class_mode = \"categorical\",\n)\n\nvalid_generator=datagen.flow_from_dataframe(\n dataframe = traindf,\n directory = \"/dataset/train/\",\n x_col = \"id\",\n y_col = \"label\",\n subset = \"validation\",\n target_size = (IMAGE_SIZE, IMAGE_SIZE),\n batch_size = 10,\n shuffle = True,\n class_mode = \"categorical\",\n)\n", "Found 165019 validated image filenames belonging to 2 classes.\nFound 55006 validated image filenames belonging to 2 classes.\n" ], [ "# Calculate class weigths\nfrom sklearn.utils import class_weight \nclass_weights = class_weight.compute_class_weight(\n 'balanced',\n classes=np.unique(traindf['label']),\n y=traindf['label']\n)\nclass_weights = dict(enumerate(class_weights))\nprint(\"Class weights:\", class_weights)", "Class weights: {0: 0.840380267057781, 1: 1.234472659537462}\n" ] ], [ [ "## Build example model", "_____no_output_____" ] ], [ [ "from keras.models import Sequential\nfrom keras.layers import Dense, Activation, Flatten, Dropout, BatchNormalization\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras import regularizers, optimizers\nfrom keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint", "_____no_output_____" ], [ "kernel_size = (3,3)\npool_size= (2,2)\nfirst_filters = 32\nsecond_filters = 64\nthird_filters = 128\n\ndropout_conv = 0.3\ndropout_dense = 0.3\n\n\nmodel = Sequential()\nmodel.add(Conv2D(first_filters, kernel_size, activation = 'relu', input_shape = (96, 96, 3)))\nmodel.add(Conv2D(first_filters, kernel_size, activation = 'relu'))\nmodel.add(Conv2D(first_filters, kernel_size, activation = 'relu'))\nmodel.add(MaxPooling2D(pool_size = pool_size)) \nmodel.add(Dropout(dropout_conv))\n\nmodel.add(Conv2D(second_filters, kernel_size, activation ='relu'))\nmodel.add(Conv2D(second_filters, kernel_size, activation ='relu'))\nmodel.add(Conv2D(second_filters, kernel_size, activation ='relu'))\nmodel.add(MaxPooling2D(pool_size = pool_size))\nmodel.add(Dropout(dropout_conv))\n\nmodel.add(Conv2D(third_filters, kernel_size, activation ='relu'))\nmodel.add(Conv2D(third_filters, kernel_size, activation ='relu'))\nmodel.add(Conv2D(third_filters, kernel_size, activation ='relu'))\nmodel.add(MaxPooling2D(pool_size = pool_size))\nmodel.add(Dropout(dropout_conv))\n\nmodel.add(Flatten())\nmodel.add(Dense(256, activation = \"relu\"))\nmodel.add(Dropout(dropout_dense))\nmodel.add(Dense(2, activation = \"softmax\"))\n\n\nmodel.compile(\n optimizers.Adam(lr=0.0001), \n loss='categorical_crossentropy', \n metrics=['accuracy']\n)\n\nmodel.summary()", "Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d (Conv2D) (None, 94, 94, 32) 896 \n_________________________________________________________________\nconv2d_1 (Conv2D) (None, 92, 92, 32) 9248 \n_________________________________________________________________\nconv2d_2 (Conv2D) (None, 90, 90, 32) 9248 \n_________________________________________________________________\nmax_pooling2d (MaxPooling2D) (None, 45, 45, 32) 0 \n_________________________________________________________________\ndropout (Dropout) (None, 45, 45, 32) 0 \n_________________________________________________________________\nconv2d_3 (Conv2D) (None, 43, 43, 64) 18496 \n_________________________________________________________________\nconv2d_4 (Conv2D) (None, 41, 41, 64) 36928 \n_________________________________________________________________\nconv2d_5 (Conv2D) (None, 39, 39, 64) 36928 \n_________________________________________________________________\nmax_pooling2d_1 (MaxPooling2 (None, 19, 19, 64) 0 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 19, 19, 64) 0 \n_________________________________________________________________\nconv2d_6 (Conv2D) (None, 17, 17, 128) 73856 \n_________________________________________________________________\nconv2d_7 (Conv2D) (None, 15, 15, 128) 147584 \n_________________________________________________________________\nconv2d_8 (Conv2D) (None, 13, 13, 128) 147584 \n_________________________________________________________________\nmax_pooling2d_2 (MaxPooling2 (None, 6, 6, 128) 0 \n_________________________________________________________________\ndropout_2 (Dropout) (None, 6, 6, 128) 0 \n_________________________________________________________________\nflatten (Flatten) (None, 4608) 0 \n_________________________________________________________________\ndense (Dense) (None, 256) 1179904 \n_________________________________________________________________\ndropout_3 (Dropout) (None, 256) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 2) 514 \n=================================================================\nTotal params: 1,661,186\nTrainable params: 1,661,186\nNon-trainable params: 0\n_________________________________________________________________\n" ] ], [ [ "## Train the example model", "_____no_output_____" ] ], [ [ "STEP_SIZE_TRAIN = train_generator.n//train_generator.batch_size\nSTEP_SIZE_VALID = valid_generator.n//valid_generator.batch_size\n\nprint(\"STEP_SIZE_TRAIN:\", STEP_SIZE_TRAIN)\nprint(\"STEP_SIZE_VALID:\", STEP_SIZE_VALID)\n\n# Save best model\ncheckpointPath = \"/usr/src/scripts/best-model.h5\"\ncheckpoint = ModelCheckpoint(\n checkpointPath,\n monitor='val_accuracy',\n verbose=1, \n save_best_only=True,\n mode='max'\n)\n\n# Dynamic learning rate\nreduce_lr = ReduceLROnPlateau(\n monitor='val_accuracy',\n factor=0.5,\n patience=2, \n verbose=1,\n mode='max',\n min_lr=0.00001\n)\n \ncallbacks_list = [checkpoint, reduce_lr]", "STEP_SIZE_TRAIN: 16501\nSTEP_SIZE_VALID: 5500\n" ], [ "model.fit(\n train_generator,\n steps_per_epoch=STEP_SIZE_TRAIN,\n class_weight=class_weights,\n validation_data=valid_generator,\n validation_steps=STEP_SIZE_VALID,\n epochs=10, # Only for test!\n verbose=1,\n callbacks=callbacks_list\n)\n", "Epoch 1/10\n16501/16501 [==============================] - 279s 17ms/step - loss: 0.4872 - accuracy: 0.7675 - val_loss: 0.5604 - val_accuracy: 0.7587\n\nEpoch 00001: val_accuracy improved from -inf to 0.75871, saving model to /usr/src/scripts/best-model.h5\nEpoch 2/10\n16501/16501 [==============================] - 269s 16ms/step - loss: 0.3555 - accuracy: 0.8458 - val_loss: 0.3169 - val_accuracy: 0.8666\n\nEpoch 00002: val_accuracy improved from 0.75871 to 0.86656, saving model to /usr/src/scripts/best-model.h5\nEpoch 3/10\n16501/16501 [==============================] - 257s 16ms/step - loss: 0.3053 - accuracy: 0.8721 - val_loss: 0.2665 - val_accuracy: 0.8878\n\nEpoch 00003: val_accuracy improved from 0.86656 to 0.88782, saving model to /usr/src/scripts/best-model.h5\nEpoch 4/10\n16501/16501 [==============================] - 253s 15ms/step - loss: 0.2698 - accuracy: 0.8913 - val_loss: 0.2903 - val_accuracy: 0.8755\n\nEpoch 00004: val_accuracy did not improve from 0.88782\nEpoch 5/10\n16501/16501 [==============================] - 253s 15ms/step - loss: 0.2514 - accuracy: 0.8993 - val_loss: 0.2367 - val_accuracy: 0.9053\n\nEpoch 00005: val_accuracy improved from 0.88782 to 0.90535, saving model to /usr/src/scripts/best-model.h5\nEpoch 6/10\n16501/16501 [==============================] - 252s 15ms/step - loss: 0.2344 - accuracy: 0.9077 - val_loss: 0.2248 - val_accuracy: 0.9140\n\nEpoch 00006: val_accuracy improved from 0.90535 to 0.91398, saving model to /usr/src/scripts/best-model.h5\nEpoch 7/10\n16501/16501 [==============================] - 253s 15ms/step - loss: 0.2195 - accuracy: 0.9145 - val_loss: 0.1970 - val_accuracy: 0.9259\n\nEpoch 00007: val_accuracy improved from 0.91398 to 0.92585, saving model to /usr/src/scripts/best-model.h5\nEpoch 8/10\n16501/16501 [==============================] - 253s 15ms/step - loss: 0.2096 - accuracy: 0.9191 - val_loss: 0.2133 - val_accuracy: 0.9174\n\nEpoch 00008: val_accuracy did not improve from 0.92585\nEpoch 9/10\n16501/16501 [==============================] - 253s 15ms/step - loss: 0.2055 - accuracy: 0.9209 - val_loss: 0.2120 - val_accuracy: 0.9185\n\nEpoch 00009: val_accuracy did not improve from 0.92585\n\nEpoch 00009: ReduceLROnPlateau reducing learning rate to 4.999999873689376e-05.\nEpoch 10/10\n16501/16501 [==============================] - 253s 15ms/step - loss: 0.1774 - accuracy: 0.9332 - val_loss: 0.1933 - val_accuracy: 0.9244\n\nEpoch 00010: val_accuracy did not improve from 0.92585\n" ], [ "val_loss, val_accuracy = model.evaluate(valid_generator, steps=STEP_SIZE_VALID)", "5500/5500 [==============================] - 36s 7ms/step - loss: 0.1932 - accuracy: 0.9244\n" ] ], [ [ "## Predict test data", "_____no_output_____" ] ], [ [ "# Load test data\ntestdf = pd.read_csv(\"/dataset/sample_submission.csv\")\ntestdf[\"id\"] = testdf[\"id\"].apply(appendExt)\n\n# Set up test data generator (only apply normalization)\ntest_datagen=ImageDataGenerator(rescale=1./255.)\n\ntest_generator=test_datagen.flow_from_dataframe(\n dataframe=testdf,\n directory=\"/dataset/test/\",\n x_col=\"id\",\n y_col=None,\n batch_size=10,\n shuffle=False,\n class_mode=None,\n target_size=(IMAGE_SIZE,IMAGE_SIZE)\n)\n\n\nSTEP_SIZE_TEST = test_generator.n//test_generator.batch_size\n", "Found 57458 validated image filenames.\n" ], [ "test_generator.reset()\nmodel.predict(test_generator, steps=STEP_SIZE_TEST)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
c52cfbd9e364b867226c36e951b77013799d36da
10,090
ipynb
Jupyter Notebook
azure/notebooks/Train-SKLearn.ipynb
ethem-kinginthenorth/cloud-ml-examples
e434d2bdbf2adf058dc436f992a56585537dc8ab
[ "Apache-2.0" ]
102
2020-05-14T14:46:38.000Z
2022-03-18T22:40:36.000Z
azure/notebooks/Train-SKLearn.ipynb
miroenev/cloud-ml-examples
1760ff36f40938afaa71ea6de3db9a512e568ae0
[ "Apache-2.0" ]
105
2020-05-17T12:19:14.000Z
2022-02-28T15:41:38.000Z
azure/notebooks/Train-SKLearn.ipynb
miroenev/cloud-ml-examples
1760ff36f40938afaa71ea6de3db9a512e568ae0
[ "Apache-2.0" ]
72
2020-05-14T21:49:29.000Z
2022-03-03T19:21:36.000Z
27.643836
402
0.597027
[ [ [ "# Train with Scikit-learn on AzureML", "_____no_output_____" ], [ "## Prerequisites", "_____no_output_____" ], [ "* Install the Azure Machine Learning Python SDK and create an Azure ML Workspace", "_____no_output_____" ] ], [ [ "import time\n#check core SDK version\nimport azureml.core\n\nprint(\"SDK version:\", azureml.core.VERSION)", "_____no_output_____" ], [ "# data_dir = '../../data_airline_updated'", "_____no_output_____" ] ], [ [ "## Initialize workspace", "_____no_output_____" ], [ "Initialize a [Workspace](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#workspace) object from the existing workspace you created in the Prerequisites step. `Workspace.from_config()` creates a workspace object from the details stored in `config.json`.", "_____no_output_____" ] ], [ [ "from azureml.core.workspace import Workspace\n\n# if a locally-saved configuration file for the workspace is not available, use the following to load workspace\n# ws = Workspace(subscription_id=subscription_id, resource_group=resource_group, workspace_name=workspace_name)\n\nws = Workspace.from_config()\nprint('Workspace name: ' + ws.name, \n 'Azure region: ' + ws.location, \n 'Subscription id: ' + ws.subscription_id, \n 'Resource group: ' + ws.resource_group, sep = '\\n')\n\ndatastore = ws.get_default_datastore()\nprint(\"Default datastore's name: {}\".format(datastore.name))", "_____no_output_____" ], [ "# datastore.upload(src_dir='../../data_airline_updated', target_path='data_airline', overwrite=False, show_progress=True)", "_____no_output_____" ], [ "path_on_datastore = 'data_airline'\nds_data = datastore.path(path_on_datastore)\nprint(ds_data)", "_____no_output_____" ] ], [ [ "## Create AmlCompute", "_____no_output_____" ], [ "You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, we use Azure ML managed compute ([AmlCompute](https://docs.microsoft.com/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute)) for our remote training compute resource.\n\nAs with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota.", "_____no_output_____" ], [ "If we could not find the cluster with the given name, then we will create a new cluster here. We will create an `AmlCompute` cluster of `Standard_DS5_v2` GPU VMs.", "_____no_output_____" ] ], [ [ "from azureml.core.compute import ComputeTarget, AmlCompute\nfrom azureml.core.compute_target import ComputeTargetException\n\n#choose a name for your cluster\ncpu_cluster_name = \"cpu-cluster\"\n\nif cpu_cluster_name in ws.compute_targets:\n cpu_cluster = ws.compute_targets[cpu_cluster_name]\n if cpu_cluster and type(cpu_cluster) is AmlCompute:\n print('Found compute target. Will use {0} '.format(cpu_cluster_name))\nelse:\n print(\"creating new cluster\")\n\n provisioning_config = AmlCompute.provisioning_configuration(vm_size = 'Standard_DS5_v2', max_nodes = 1)\n\n #create the cluster\n cpu_cluster = ComputeTarget.create(ws, cpu_cluster_name, provisioning_config)\n \n #can poll for a minimum number of nodes and for a specific timeout. \n #if no min node count is provided it uses the scale settings for the cluster\n cpu_cluster.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)\n \n#use get_status() to get a detailed status for the current cluster. \nprint(cpu_cluster.get_status().serialize())", "_____no_output_____" ] ], [ [ "## Train model on the remote compute", "_____no_output_____" ], [ "Now that you have your data and training script prepared, you are ready to train on your remote compute.", "_____no_output_____" ], [ "Create a directory that will contain all the necessary code from your local machine that you will need access to on the remote resource. This includes the training script and any additional files your training script depends on.", "_____no_output_____" ] ], [ [ "import os\n\nproject_folder = './train_sklearn'\nos.makedirs(project_folder, exist_ok=True)", "_____no_output_____" ] ], [ [ "### Prepare training script", "_____no_output_____" ], [ "Now you will need to create your training script. We log the parameters and the highest accuracy the model achieves:\n\n```python\n\nrun.log('Accuracy', np.float(accuracy))\n```\n\nThese run metrics will become particularly important when we begin hyperparameter tuning our model in the \"Tune model hyperparameters\" section.\n\nOnce your script is ready, copy the training script `train_sklearn_RF.py` into your project directory.", "_____no_output_____" ] ], [ [ "import shutil\n\nshutil.copy('train_sklearn_RF.py', project_folder)", "_____no_output_____" ] ], [ [ "### Create an experiment", "_____no_output_____" ], [ "Create an [Experiment](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#experiment) to track all the runs in your workspace.", "_____no_output_____" ] ], [ [ "from azureml.core import Experiment\n\nexperiment_name = 'train_sklearn'\nexperiment = Experiment(ws, name=experiment_name)", "_____no_output_____" ] ], [ [ "### Create a scikit-learn estimator", "_____no_output_____" ] ], [ [ "from azureml.train.sklearn import SKLearn\n\nscript_params = {\n '--data_dir': ds_data.as_mount(),\n '--n_estimators': 100,\n '--max_depth': 8,\n '--max_features': 0.6,\n}\n\nestimator = SKLearn(source_directory=project_folder, \n script_params=script_params,\n compute_target=cpu_cluster,\n entry_script='train_sklearn_RF.py',\n pip_packages=['pyarrow'])", "_____no_output_____" ] ], [ [ "The `script_params` parameter is a dictionary containing the command-line arguments to your training script `entry_script`.", "_____no_output_____" ], [ "### Submit job", "_____no_output_____" ], [ "Run your experiment by submitting your estimator object. Note that this call is asynchronous.", "_____no_output_____" ] ], [ [ "run = experiment.submit(estimator)", "_____no_output_____" ] ], [ [ "## Monitor your run", "_____no_output_____" ], [ "Monitor the progress of the run with a Jupyter widget.The widget is asynchronous and provides live updates every 10-15 seconds until the job completes.", "_____no_output_____" ] ], [ [ "from azureml.widgets import RunDetails\n\nRunDetails(run).show()", "_____no_output_____" ], [ "# run.cancel()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ] ]
c52cfd4cc76334028ba20103edf6de75f1ee1889
202,964
ipynb
Jupyter Notebook
experiments/experiment_2/20200120_precision_recall.ipynb
neurodata/dos_and_donts
b49a61a8aa29dbde86651bd39c9322f0eb3c0694
[ "BSD-3-Clause" ]
3
2020-05-17T21:56:52.000Z
2020-12-09T04:27:31.000Z
experiments/experiment_2/20200120_precision_recall.ipynb
neurodata/dos_and_donts
b49a61a8aa29dbde86651bd39c9322f0eb3c0694
[ "BSD-3-Clause" ]
2
2020-08-06T04:58:37.000Z
2020-08-06T05:02:37.000Z
experiments/experiment_2/20200120_precision_recall.ipynb
neurodata/dos_and_donts
b49a61a8aa29dbde86651bd39c9322f0eb3c0694
[ "BSD-3-Clause" ]
1
2020-08-12T02:29:11.000Z
2020-08-12T02:29:11.000Z
444.122538
99,728
0.927775
[ [ [ "import warnings\nfrom itertools import product\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom graspy.plot import heatmap\nfrom graspy.simulations import er_np, sbm\nfrom graspy.utils import symmetrize\nfrom joblib import Parallel, delayed\nfrom scipy.stats import ttest_ind, wilcoxon, mannwhitneyu, truncnorm\n\nwarnings.filterwarnings(\"ignore\")\n%matplotlib inline", "_____no_output_____" ], [ "def generate_pop(m, var_1, var_2, seed, block_1 = 5, block_2=15):\n np.random.seed(seed)\n\n n = [block_1, block_2]\n p = [\n [1, 1],\n [1, 1]\n ]\n sd_1 = np.sqrt(var_1)\n sd_2 = np.sqrt(var_2)\n \n wt_func = [\n [truncnorm.rvs, truncnorm.rvs],\n [truncnorm.rvs, truncnorm.rvs]\n ]\n \n wt_args_1 = [\n [dict(a=-1/sd_1, b=1/sd_1, scale=sd_1, random_state=seed), dict(a=-1/sd_1, b=1/sd_1, scale=sd_1, random_state=seed)],\n [dict(a=-1/sd_1, b=1/sd_1, scale=sd_1, random_state=seed), dict(a=-1/sd_1, b=1/sd_1, scale=sd_1, random_state=seed)],\n ]\n\n wt_args_2 = [\n [dict(a=-1/sd_2, b=1/sd_2, scale=sd_2, random_state=seed), dict(a=-1/sd_2, b=1/sd_2, scale=sd_2, random_state=seed)],\n [dict(a=-1/sd_2, b=1/sd_2, scale=sd_2, random_state=seed), dict(a=-1/sd_2, b=1/sd_2, scale=sd_2, random_state=seed)],\n ]\n \n pop_1 = np.array([sbm(n, p, wt=wt_func, wtargs=wt_args_1) for _ in range(m)])\n pop_2 = np.array([sbm(n, p, wt=wt_func, wtargs=wt_args_2) for _ in range(m)])\n \n return pop_1, pop_2\n\ndef compute_statistic(test, pop1, pop2):\n if test.__name__ == 'ttest_ind':\n test_statistics, _ = ttest_ind(pop1, pop2, axis=0)\n np.nan_to_num(test_statistics, copy=False)\n else:\n n = pop1.shape[-1]\n test_statistics = np.zeros((n, n))\n \n for i in range(n):\n for j in range(i, n):\n x_ij = pop1[:, i, j]\n y_ij = pop2[:, i, j]\n\n if np.array_equal(x_ij, y_ij):\n test_statistics[i, j] = 0\n else:\n tmp, pval = test(x_ij, y_ij)\n test_statistics[i, j] = tmp\n \n test_statistics = symmetrize(test_statistics)\n \n return test_statistics\n\ndef compute_pr_at_k(different_n, k, test_statistics, test):\n n = test_statistics.shape[0]\n labels = np.zeros((n, n))\n labels[0:different_n, 0:different_n] = 1\n \n triu_idx = np.triu_indices_from(test_statistics, k=1)\n test_statistics_ = np.abs(test_statistics[triu_idx])\n labels_ = labels[triu_idx]\n \n if test.__name__ == 'ttest_ind':\n idx = np.argsort(test_statistics_)[::-1]\n else:\n idx = np.argsort(test_statistics_)\n sorted_labels = labels_[idx]\n \n precision_at_k = sorted_labels[:k].mean()\n recall_at_k = sorted_labels[:k].sum() / sorted_labels.sum()\n \n return precision_at_k, recall_at_k\n\ndef compute_trustworthiness(pvals):\n idx = np.triu_indices(pvals.shape[0], k=1)\n res = pvals[idx]\n \n fraction_correct = (res <=0.05).mean()\n all_correct = np.all(res <= 0.05)\n \n return fraction_correct, all_correct", "_____no_output_____" ], [ "def run_experiment(m, var_1, var_2, seed, reps):\n tests = ttest_ind, wilcoxon, mannwhitneyu\n \n precisions = []\n recalls = []\n \n for i in range(reps):\n tmp_precisions = []\n tmp_recalls = [] \n pop1, pop2 = generate_pop(m=m, var_1=var_1, var_2=var_2, seed = seed+i)\n \n for test in tests:\n test_statistics = compute_statistic(test, pop1, pop2)\n \n for k in range(1, 11):\n precision, recall = compute_pr_at_k(5, k, test_statistics, test)\n tmp_precisions.append(precision)\n tmp_recalls.append(recall)\n\n precisions.append(tmp_precisions)\n recalls.append(tmp_recalls)\n \n precisions = np.array(precisions).mean(axis=0)\n recalls = np.array(recalls).mean(axis=0)\n \n to_append = [var_1, var_2, m, *precisions, *recalls]\n\n return to_append", "_____no_output_____" ], [ "spacing = 50\ndelta = 0.05\n\nvar_1s = np.linspace(1, 3, spacing)\nvar_2s = np.ones(spacing) \nms = np.linspace(0, 500, spacing +1).astype(int)[1:]\nreps=100", "_____no_output_____" ], [ "args = [\n (m, var_1, var_2, seed*reps, reps) \n for seed, (m, (var_1, var_2)) \n in enumerate(product(ms, zip(var_1s, var_2s)))\n]\n\nres = Parallel(n_jobs=-3, verbose=1)(\n delayed(run_experiment)(\n *arg\n ) for arg in args\n)", "[Parallel(n_jobs=-3)]: Using backend LokyBackend with 54 concurrent workers.\n[Parallel(n_jobs=-3)]: Done 92 tasks | elapsed: 1.5min\n[Parallel(n_jobs=-3)]: Done 342 tasks | elapsed: 9.6min\n[Parallel(n_jobs=-3)]: Done 692 tasks | elapsed: 31.0min\n[Parallel(n_jobs=-3)]: Done 1142 tasks | elapsed: 75.5min\n[Parallel(n_jobs=-3)]: Done 1692 tasks | elapsed: 156.4min\n[Parallel(n_jobs=-3)]: Done 2342 tasks | elapsed: 290.4min\n[Parallel(n_jobs=-3)]: Done 2500 out of 2500 | elapsed: 323.8min finished\n" ], [ "cols = [\n 'var1',\n 'var2',\n 'm', \n *[f\"{test.__name__}_precision_at_{k}\" for test in [ttest_ind, wilcoxon, mannwhitneyu] for k in range(1, 11)], \n *[f\"{test.__name__}_recall_at_{k}\" for test in [ttest_ind, wilcoxon, mannwhitneyu] for k in range(1, 11)]]\nres_df = pd.DataFrame(res, columns = cols) ", "_____no_output_____" ], [ "res_df.to_csv(\"20200120_precision_recall_results.csv\", index=False)", "_____no_output_____" ] ], [ [ "# Figures", "_____no_output_____" ] ], [ [ "size = np.sqrt(res_df.shape[0]).astype(int)\n\nttest_prec = np.flipud(res_df.ttest_ind_precision_at_10.values.reshape(-1, spacing))\nwilcoxon_prec = np.flipud(res_df.wilcoxon_precision_at_10.values.reshape(-1, spacing))\nmannwhitney_prec = np.flipud(res_df.mannwhitneyu_precision_at_10.values.reshape(-1, spacing))\n\nsamples = np.linspace(0, 500, spacing +1).astype(int)[1:] *2\nsamples = [str(i) for i in samples]\n\nvmin = -.5\nvmax = -vmin\n\n\nfmt = lambda x: \"{:.2f}\".format(x)\nwith sns.plotting_context('talk', font_scale=1.25):\n # fig, ax = plt.subplots(figsize=(10, 10))\n fig, ax = plt.subplots(1, 4, gridspec_kw={'width_ratios': [1, 1, 1, 0.05]}, figsize=(20, 8))\n \n sns.heatmap(\n ttest_prec - wilcoxon_prec,\n ax = ax[0],\n square=True,\n center=0,\n cmap=\"RdBu_r\",\n cbar_kws = dict(shrink=0.7),\n xticklabels=[f\"{mu1:.02f}\" for mu1 in var_1s],\n yticklabels=[f\"{int(m*2)}\" for m in ms][::-1],\n cbar_ax=ax[-1],\n vmin=vmin,\n vmax=vmax\n )\n #ax[0].set_xticks(np.arange(0, ax[0].get_xlim()[1]+1, 10))\n #ax[0].set_yticks(np.arange(0, ax[0].get_ylim()[0]+1, 10)[::-1])\n ax[0].set_title(\"T-Test - Wilcoxon\")\n \n sns.heatmap(\n ttest_prec - mannwhitney_prec,\n ax = ax[1],\n square=True,\n center=0,\n cmap=\"RdBu_r\",\n cbar_kws = dict(shrink=0.7),\n xticklabels=[f\"{mu1:.02f}\" for mu1 in var_1s],\n cbar_ax=ax[-1],\n vmin=vmin,\n vmax=vmax\n )\n #ax[1].set_xticks(np.arange(0, ax[1].get_xlim()[1]+1, 10))\n #ax[1].set_yticks(np.arange(0, ax[1].get_ylim()[0]+1, 10)[::-1])\n ax[1].yaxis.set_major_formatter(plt.NullFormatter())\n ax[1].set_title(\"T-Test - Mann-Whitney\")\n \n sns.heatmap(\n wilcoxon_prec - mannwhitney_prec,\n ax = ax[2],\n square=True,\n center=0,\n cmap=\"RdBu_r\",\n cbar_kws = dict(shrink=0.7),\n xticklabels=[f\"{mu1:.02f}\" for mu1 in var_2s],\n cbar_ax=ax[-1],\n vmin=vmin,\n vmax=vmax\n )\n #ax[2].set_xticks(np.arange(0, ax[1].get_xlim()[1]+1, 10))\n #ax[2].set_yticks(np.arange(0, ax[1].get_ylim()[0]+1, 10)[::-1])\n ax[2].yaxis.set_major_formatter(plt.NullFormatter())\n ax[2].set_title(\"Wilcoxon - Mann-Whitney\")\n\n fig.text(-0.01, 0.5, \"Sample Size\", va='center', rotation='vertical')\n fig.text(0.5, -0.03, \"Mu\", va='center', ha='center')\n \n fig.tight_layout()\n \n #fig.savefig(\"./figures/20191209_precision_diff.png\", dpi=300, bbox_inches='tight')\n #fig.savefig(\"./figures/20191209_precision_diff.pdf\", dpi=300, bbox_inches='tight')", "_____no_output_____" ], [ "size = np.sqrt(res_df.shape[0]).astype(int)\n\nttest_prec = np.flipud(res_df.ttest_ind_precision_at_10.values.reshape(-1, spacing))\nwilcoxon_prec = np.flipud(res_df.wilcoxon_precision_at_10.values.reshape(-1, spacing))\nmannwhitney_prec = np.flipud(res_df.mannwhitneyu_precision_at_10.values.reshape(-1, spacing))\n\nsamples = np.arange(0, 501, spacing)[1:] *2\nsamples[0] += 10\nsamples = [str(i) for i in samples]\n\nvmin = -0.05\nvmax = -vmin\n\n\nfmt = lambda x: \"{:.2f}\".format(x)\nwith sns.plotting_context('talk', font_scale=1.25):\n # fig, ax = plt.subplots(figsize=(10, 10))\n fig, ax = plt.subplots(1, 4, gridspec_kw={'width_ratios': [1, 1, 1, 0.05]}, figsize=(20, 8))\n \n sns.heatmap(\n ttest_prec - wilcoxon_prec,\n ax = ax[0],\n square=True,\n center=0,\n cmap=\"RdBu_r\",\n cbar_kws = dict(shrink=0.7),\n xticklabels=[f\"{mu1:.02f}\" for mu1 in var_1s],\n yticklabels=[f\"{int(m*2)}\" for m in ms][::-1],\n cbar_ax=ax[-1],\n vmin=vmin,\n vmax=vmax\n )\n #ax[0].set_xticks(np.arange(0, ax[0].get_xlim()[1]+1, 10))\n #ax[0].set_yticks(np.arange(0, ax[0].get_ylim()[0]+1, 10)[::-1])\n ax[0].set_title(\"T-Test - Wilcoxon\")\n \n sns.heatmap(\n ttest_prec - mannwhitney_prec,\n ax = ax[1],\n square=True,\n center=0,\n cmap=\"RdBu_r\",\n cbar_kws = dict(shrink=0.7),\n xticklabels=[f\"{mu1:.02f}\" for mu1 in var_1s],\n cbar_ax=ax[-1],\n vmin=vmin,\n vmax=vmax\n )\n #ax[1].set_xticks(np.arange(0, ax[1].get_xlim()[1]+1, 10))\n #ax[1].set_yticks(np.arange(0, ax[1].get_ylim()[0]+1, 10)[::-1])\n ax[1].yaxis.set_major_formatter(plt.NullFormatter())\n ax[1].set_title(\"T-Test - Mann-Whitney\")\n \n sns.heatmap(\n wilcoxon_prec - mannwhitney_prec,\n ax = ax[2],\n square=True,\n center=0,\n cmap=\"RdBu_r\",\n cbar_kws = dict(shrink=0.7),\n xticklabels=[f\"{mu1:.02f}\" for mu1 in var_2s],\n cbar_ax=ax[-1],\n vmin=vmin,\n vmax=vmax\n )\n #ax[2].set_xticks(np.arange(0, ax[1].get_xlim()[1]+1, 10))\n #ax[2].set_yticks(np.arange(0, ax[1].get_ylim()[0]+1, 10)[::-1])\n ax[2].yaxis.set_major_formatter(plt.NullFormatter())\n ax[2].set_title(\"Wilcoxon - Mann-Whitney\")\n\n fig.text(-0.01, 0.5, \"Sample Size\", va='center', rotation='vertical')\n fig.text(0.5, -0.03, \"Mu\", va='center', ha='center')\n \n fig.tight_layout()\n \n #fig.savefig(\"./figures/20191209_precision_diff.png\", dpi=300, bbox_inches='tight')\n #fig.savefig(\"./figures/20191209_precision_diff.pdf\", dpi=300, bbox_inches='tight')", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
c52d128b3acc85e54a1b59cd28dfc379006fcc1b
873,192
ipynb
Jupyter Notebook
ML Notebooks/Logistic Regression.ipynb
Sahil-Chavan/ML_Playground
cd6b12db7f64e58aae88d7672343aa0406347bb1
[ "Unlicense" ]
null
null
null
ML Notebooks/Logistic Regression.ipynb
Sahil-Chavan/ML_Playground
cd6b12db7f64e58aae88d7672343aa0406347bb1
[ "Unlicense" ]
9
2020-09-30T20:07:30.000Z
2021-02-21T18:39:16.000Z
ML Notebooks/Logistic Regression.ipynb
Sahil-Chavan/ML_Playground
cd6b12db7f64e58aae88d7672343aa0406347bb1
[ "Unlicense" ]
null
null
null
54.21869
16,144
0.507087
[ [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport seaborn as sns", "_____no_output_____" ] ], [ [ "print('sdjhjfvsjdf')", "sdjhjfvsjdf\n" ], [ "df_a = pd.read_csv('../datafiles/titanic_train.csv')\ndf_b = pd.read_csv('../datafiles/titanic_test.csv')\nprint(df_a.shape,df_b.shape)", "(891, 12) (418, 11)\n" ], [ "df_b", "_____no_output_____" ], [ "df_a.head()", "_____no_output_____" ], [ "df_a.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 891 entries, 0 to 890\nData columns (total 12 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 PassengerId 891 non-null int64 \n 1 Survived 891 non-null int64 \n 2 Pclass 891 non-null int64 \n 3 Name 891 non-null object \n 4 Sex 891 non-null object \n 5 Age 714 non-null float64\n 6 SibSp 891 non-null int64 \n 7 Parch 891 non-null int64 \n 8 Ticket 891 non-null object \n 9 Fare 891 non-null float64\n 10 Cabin 204 non-null object \n 11 Embarked 889 non-null object \ndtypes: float64(2), int64(5), object(5)\nmemory usage: 83.7+ KB\n" ], [ "# df_a.drop('PassengerId',1, None,None,None,True)", "_____no_output_____" ], [ "df_a", "_____no_output_____" ], [ "df_a['Survived'].hist(by=df_a['Sex'])", "_____no_output_____" ], [ "sns.countplot(x='Survived',hue='Sex',data=df_a)", "_____no_output_____" ], [ "sns.countplot(x='Survived',hue='Pclass',data=df_a)", "_____no_output_____" ], [ "sns.distplot(a=df_a['Survived'])", "_____no_output_____" ], [ "fig,axes = plt.subplots()\naxes.scatter(x='Survived',y='Sex',data=df_a)", "_____no_output_____" ] ], [ [ "Correlation", "_____no_output_____" ] ], [ [ "df_a.corr()", "_____no_output_____" ], [ "sns.heatmap(data = df_a.corr())", "_____no_output_____" ] ], [ [ "Concatenating the dataset", "_____no_output_____" ] ], [ [ "Y=df_a['Survived'].copy()\ndf_a_temp =df_a.drop('Survived',1)\nful = pd.concat([df_a_temp,df_b])\nful.reset_index(inplace=True)\nful", "_____no_output_____" ] ], [ [ "missing value percentage", "_____no_output_____" ] ], [ [ "(ful.isna().sum() / ful.shape[0])*100", "_____no_output_____" ] ], [ [ "Filling age with the avg from respective 'Pclass' attribut", "_____no_output_____" ] ], [ [ "ful.groupby('Pclass')['Age'].apply(lambda x:print(x))", "1 38.0\n3 35.0\n6 54.0\n11 58.0\n23 28.0\n ... \n1294 17.0\n1295 43.0\n1298 50.0\n1302 37.0\n1305 39.0\nName: 1, Length: 323, dtype: float64\n9 14.0\n15 55.0\n17 NaN\n20 35.0\n21 34.0\n ... \n1278 57.0\n1284 47.0\n1292 38.0\n1296 20.0\n1297 23.0\nName: 2, Length: 277, dtype: float64\n0 22.0\n2 26.0\n4 35.0\n5 NaN\n7 2.0\n ... \n1303 28.0\n1304 NaN\n1306 38.5\n1307 NaN\n1308 NaN\nName: 3, Length: 709, dtype: float64\n" ], [ "ful['Age'] = ful.groupby('Pclass')['Age'].transform(lambda x:x.fillna(x.mean()))\nful", "_____no_output_____" ] ], [ [ "dropping the 'Cabin' column as it was wy more missing values and also its categories done think matter much", "_____no_output_____" ] ], [ [ "ful.drop(['Cabin'],axis=1,inplace=True);", "_____no_output_____" ], [ "ful", "_____no_output_____" ] ], [ [ "check for missing values", "_____no_output_____" ], [ "Analysing the relation between the Fare and the Embarked to fill the embarked missing values", "_____no_output_____" ] ], [ [ "ful[ful['Fare']>300 ]", "_____no_output_____" ], [ "fig = plt.figure(figsize=(7,10))\nsns.boxplot(x='Embarked',y='Fare',data=ful,showfliers = False)", "_____no_output_____" ], [ "ful.groupby('Embarked')['Fare'].median()", "_____no_output_____" ], [ "ful.groupby('Embarked')['Fare'].mean()", "_____no_output_____" ] ], [ [ "Function to sort the values of the Fare according to the Embarked attribute(whichh is the destination of boarding)", "_____no_output_____" ] ], [ [ "def impute_embarked(data):\n if(pd.isna(data[1])):\n if(0<data[0]<=20):\n return 'Q'\n elif(20<data[0]<=45):\n return 'S'\n else:\n return 'C'\n else:\n return data[1]", "_____no_output_____" ], [ "ful[ful['Embarked'].isna()]", "_____no_output_____" ], [ "ful['Embarked']=ful[['Fare','Embarked']].apply(impute_embarked,axis=1)", "_____no_output_____" ], [ "ful.iloc[61]", "_____no_output_____" ], [ "(ful.isna().sum() / ful.shape[0])*100", "_____no_output_____" ], [ "ful[ful['Fare'].isna()]", "_____no_output_____" ] ], [ [ "as you can see that the Embarked is 'S' so i am goung to directly put 27 as the mean() in its category \nyou can also leave some missong vaues missing : )", "_____no_output_____" ] ], [ [ "ful.loc[1043,'Fare'] = 27\nful.iloc[1043]['Fare']", "_____no_output_____" ], [ "(ful.isna().sum() / ful.shape[0])*100", "_____no_output_____" ] ], [ [ "We are good to go !!!!! keeping relevent data", "_____no_output_____" ] ], [ [ "ful", "_____no_output_____" ], [ "ful.drop(columns=['index','PassengerId','Name','Ticket'], inplace=True)", "_____no_output_____" ] ], [ [ "Procedure to convet the cateorical variables", "_____no_output_____" ] ], [ [ "print(ful)\nprint(ful.nunique())", "Pclass Sex Age SibSp Parch Fare Embarked\n0 3 male 22.000000 1 0 7.2500 S\n1 1 female 38.000000 1 0 71.2833 C\n2 3 female 26.000000 0 0 7.9250 S\n3 1 female 35.000000 1 0 53.1000 S\n4 3 male 35.000000 0 0 8.0500 S\n... ... ... ... ... ... ... ...\n1304 3 male 24.816367 0 0 8.0500 S\n1305 1 female 39.000000 0 0 108.9000 C\n1306 3 male 38.500000 0 0 7.2500 S\n1307 3 male 24.816367 0 0 8.0500 S\n1308 3 male 24.816367 1 1 22.3583 C\n\n[1309 rows x 7 columns]\nPclass 3\nSex 2\nAge 101\nSibSp 7\nParch 8\nFare 281\nEmbarked 3\ndtype: int64\n" ], [ "ful", "_____no_output_____" ], [ "from sklearn.compose import ColumnTransformer\nfrom sklearn.preprocessing import OneHotEncoder\ncolt = ColumnTransformer(transformers=[('encoder',OneHotEncoder(),['Pclass','Sex','SibSp','Parch','Embarked'])])", "_____no_output_____" ], [ "X= pd.DataFrame(colt.fit_transform(ful).toarray())", "_____no_output_____" ], [ "X", "_____no_output_____" ], [ "X[['Age','Fare']]= ful[['Age','Fare']].copy()", "_____no_output_____" ], [ "X", "_____no_output_____" ], [ "X_train_csv=X.loc[:890].copy()\nX_train_csv", "_____no_output_____" ] ], [ [ "> ## All the var with X_test_csv is the data set to be used to submit to the kaggle\n### And should not be used in any sort of training,, we further split the X_train_csv to train and test set to obtain the precision", "_____no_output_____" ] ], [ [ "X_test_csv=X.loc[891:].copy()\nX_test_csv", "_____no_output_____" ] ], [ [ "Now see we have combined the training and test set from the Kaggle Titanic set\n#### But if we want to evauate the model we need to use the Training_csv data and split into the train and test as i the Test_csv data we do not have Y/ Dependent Variable", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X_train_csv, Y, test_size=0.30, random_state=101)\nprint(X_train.shape,X_test.shape, y_train.shape, y_test.shape)", "(623, 25) (268, 25) (623,) (268,)\n" ], [ "from sklearn.linear_model import LogisticRegression\nlogmodel = LogisticRegression(max_iter=350)\nlogmodel.fit(X_train,y_train)", "_____no_output_____" ], [ "predictions = logmodel.predict(X_test)", "_____no_output_____" ], [ "from sklearn.metrics import classification_report\nprint(classification_report(y_test,predictions))", "precision recall f1-score support\n\n 0 0.78 0.88 0.83 154\n 1 0.81 0.66 0.72 114\n\n accuracy 0.79 268\n macro avg 0.79 0.77 0.78 268\nweighted avg 0.79 0.79 0.78 268\n\n" ] ], [ [ "##### <font color='red'> we got the prediction but now we have to see how to increase the precision \n", "_____no_output_____" ], [ "> ### TRY 2)so now we will first try creating the dummy variable using pandas...........................................................................................................", "_____no_output_____" ] ], [ [ "X2 = pd.get_dummies(data = ful,columns=['Pclass','Sex','SibSp','Parch','Embarked'],drop_first=True)\nX2", "_____no_output_____" ] ], [ [ "splittint the full set into train and test", "_____no_output_____" ] ], [ [ "X_train_csv_dummybypandas = X2.loc[:890].copy()\nX_test_csv_dummybypandas = X2.loc[891:].copy()\nprint(X_train_csv_dummybypandas.shape,X_test_csv_dummybypandas.shape)", "(891, 20) (418, 20)\n" ], [ "X_train2, X_test2, y_train2, y_test2 = train_test_split(X_train_csv_dummybypandas, Y, test_size=0.30, random_state=101)\nprint(X_train2.shape,X_test2.shape, y_train2.shape, y_test2.shape)", "(623, 20) (268, 20) (623,) (268,)\n" ], [ "logmodel2 = LogisticRegression(max_iter=350)\nlogmodel2.fit(X_train2,y_train2)", "_____no_output_____" ], [ "predictions2 = logmodel2.predict(X_test2)\nprint(classification_report(y_test2,predictions2))\nprint(logmodel2.coef_)", "precision recall f1-score support\n\n 0 0.77 0.88 0.82 154\n 1 0.80 0.65 0.72 114\n\n accuracy 0.78 268\n macro avg 0.79 0.77 0.77 268\nweighted avg 0.79 0.78 0.78 268\n\n[[-0.03611679 0.00439679 -0.67994966 -1.82629548 -2.50286423 0.29028611\n -0.06300864 -0.85241279 -0.83509974 -0.62000172 -0.71871227 0.18897612\n -0.49676233 0.43648659 -0.34829709 -0.30681595 -0.38252244 0.\n -0.00506187 -0.46302055]]\n" ] ], [ [ "># <font color='red'>so we can say that getting dummy variables from pandas or scikit learn does not matter use any one", "_____no_output_____" ], [ "> ### TRY 3)Now we try converting only Sex and Embarkment to dummy as done by Portilia.", "_____no_output_____" ] ], [ [ "X3 = pd.get_dummies(data = ful,columns=['Sex','Embarked'],drop_first=True)\nX3", "_____no_output_____" ], [ "X_train_csv_3= X3.loc[:890].copy()\nprint(X_train_csv_3.shape)\nX_train3, X_test3, y_train3, y_test3 = train_test_split(X_train_csv_3, Y, test_size=0.30,random_state=101)\nprint(X_train3.shape,X_test3.shape, y_train3.shape, y_test3.shape)\nlogmodel3 = LogisticRegression(max_iter=350)\nlogmodel3.fit(X_train3,y_train3)\npredictions3 = logmodel3.predict(X_test3)\nprint(classification_report(y_test3,predictions3))", "(891, 8)\n(623, 8) (268, 8) (623,) (268,)\n precision recall f1-score support\n\n 0 0.78 0.88 0.83 154\n 1 0.80 0.68 0.73 114\n\n accuracy 0.79 268\n macro avg 0.79 0.78 0.78 268\nweighted avg 0.79 0.79 0.79 268\n\n" ] ], [ [ "># <font color='red'>so we can say that getting dummy variables from pandas or scikit learn does not matter use any one", "_____no_output_____" ], [ "#### so now we will try to map 'Age' like portilia", "_____no_output_____" ] ], [ [ "ful2 = pd.concat([df_a,df_b])\nful2.reset_index(inplace=True)\nful2", "_____no_output_____" ], [ "def impute_age(cols):\n Age = cols[0]\n Pclass = cols[1]\n \n if pd.isnull(Age):\n\n if Pclass == 1:\n return 37\n\n elif Pclass == 2:\n return 29\n\n else:\n return 24\n\n else:\n return Age\nful2['Age'] = ful2[['Age','Pclass']].apply(impute_age,axis=1)\nful2.isna().sum()", "_____no_output_____" ], [ "def impute_embarked(data):\n if(pd.isna(data[1])):\n if(0<data[0]<=20):\n return 'Q'\n elif(20<data[0]<=45):\n return 'S'\n else:\n return 'C'\n else:\n return data[1]\n# ful2['Embarked']=ful2[['Fare','Embarked']].apply(impute_embarked,axis=1)\nful2.drop('Cabin',axis=1,inplace=True)\nful2['Embarked'].dropna(inplace=True)\nful2['Fare'].dropna(inplace=True)\nful2.isna().sum()", "_____no_output_____" ], [ "# ful2.loc[1043,'Fare'] = 27\n# ful2.drop(columns=['index','PassengerId','Name','Ticket','Cabin'], inplace=True)\nful2.drop(columns=['index','Name','Ticket'], inplace=True)\nful2.shape", "_____no_output_____" ], [ "X4 = pd.get_dummies(data = ful2,columns=['Sex','Embarked'],drop_first=True)\nX_train_csv_4= X4.loc[:890].copy()\nprint(X_train_csv_4.shape)\nX_train4, X_test4, y_train4, y_test4 = train_test_split(X_train_csv_4.drop('Survived',axis=1), Y, test_size=0.30,random_state=101)\nprint(X_train4.shape,X_test4.shape, y_train4.shape, y_test4.shape)\nlogmodel4 = LogisticRegression(max_iter=1000)\nlogmodel4.fit(X_train4,y_train4)\npredictions4 = logmodel4.predict(X_test4)\nprint(classification_report(y_test4,predictions4))\nlogmodel4.coef_", "(891, 10)\n(623, 9) (268, 9) (623,) (268,)\n precision recall f1-score support\n\n 0 0.78 0.87 0.82 154\n 1 0.79 0.68 0.73 114\n\n accuracy 0.79 268\n macro avg 0.79 0.77 0.78 268\nweighted avg 0.79 0.79 0.78 268\n\n" ] ], [ [ "# Here i tried to maximum imitate the portilia's method ", "_____no_output_____" ], [ "# <font color='blue'> Now we are going to try the Andrew NG's method", "_____no_output_____" ], [ "- Hypothesis function (not needed in the actual implementation)", "_____no_output_____" ] ], [ [ "# teta = pd.Series(np.zeros(21))\n# def H_teta(X,teta):\n# return 1/(1+np.exp(np.dot(X,teta)))\n# predictions = H_teta(X_train2,teta)", "_____no_output_____" ] ], [ [ "- Standardising the Age & Fare columns ", "_____no_output_____" ] ], [ [ "from sklearn.preprocessing import StandardScaler\nX_anng = X_train2.copy()\nX_anng[['Fare', 'Age']] = StandardScaler().fit_transform(X_anng[['Fare', 'Age']])", "_____no_output_____" ] ], [ [ "- Adding x0 in X_train2", "_____no_output_____" ] ], [ [ "# y=y_train2.to_numpy(copy=True)\n# print(y,y.reshape(len(y),1))\n# X_anng['const'] = 1\nX_anng.insert(loc=0, column='const', value=1)\nX_anng", "_____no_output_____" ] ], [ [ "- Cost function", "_____no_output_____" ] ], [ [ "def Cost(x,y,teta):\n pred = 1/(1+np.exp(-np.dot(x,teta)))\n m=len(y)\n err = (-y*np.log(pred))-((1-y)*np.log(1-pred))\n gradient = (1/m)*np.dot(x.transpose(),(pred-y))\n return (1/m)*sum(err),gradient", "_____no_output_____" ] ], [ [ "- Gradient Descent Function", "_____no_output_____" ] ], [ [ "teta = pd.Series(np.zeros(21))\ndef gradDes(x,y,teta,alpha,n):\n m=len(y)\n j_old=[]\n for i in range(n):\n cost, grad = Cost(x,y,teta)\n teta = teta - (alpha * grad)\n j_old.append(cost)\n return cost,j_old,teta\ncost,j,teta = gradDes(X_anng,y_train2,teta,1,500)", "_____no_output_____" ] ], [ [ "- Plotting the Cost vs Iterations graph", "_____no_output_____" ] ], [ [ "fig,ax = plt.subplots()\nax.scatter(np.arange(len(j)),j);", "_____no_output_____" ] ], [ [ "> ## Regularisation (Andrew NG)", "_____no_output_____" ] ], [ [ "teta2 = pd.Series(np.zeros(21)) \ndef regAN(x,y,teta,alpha,lmda,n):\n m=len(y)\n j_old=[]\n coo = []\n for i in range(n):\n cost, grad = Cost(x,y,teta)\n# print(grad.shape)\n teta[0] = teta[0] - (alpha * grad[0])\n teta.loc[1:20] = teta.loc[1:20]*(1-((alpha*lmda)/m)) - (alpha * grad[1:])\n cooo = 1-((alpha*lmda)/m)\n coo.append(cooo)\n j_old.append(cost)\n return cost,j_old,teta,coo\ncost2,j2,teta2,coo2 = regAN(X_anng,y_train2,teta2,1,1,500)", "_____no_output_____" ], [ "fig,ax = plt.subplots()\nax.scatter(np.arange(len(j2)),j2);", "_____no_output_____" ], [ "regularisation_comparison = pd.DataFrame({'teta':teta,'teta2':teta2})\nregularisation_comparison.head(100)", "_____no_output_____" ] ], [ [ "- ## Gridsearch Cross-validation", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import GridSearchCV\ngscv = GridSearchCV(LogisticRegression(),{'C':[10**-4,10**-2,10**0,10**2,10**4]},cv=5,scoring='f1')\ngscv.fit(X_train,y_train)\nprint(gscv.best_estimator_)\nprint(gscv.score(X_test,y_test))", "LogisticRegression(C=100, class_weight=None, dual=False, fit_intercept=True,\n intercept_scaling=1, l1_ratio=None, max_iter=100,\n multi_class='auto', n_jobs=None, penalty='l2',\n random_state=None, solver='lbfgs', tol=0.0001, verbose=0,\n warm_start=False)\n0.7307692307692306\n" ], [ "gscv2 = GridSearchCV(LogisticRegression(),{'C':[50,100,200,500,1000,5000]},cv=5,scoring='f1')\ngscv2.fit(X_train,y_train)\nprint(gscv2.best_estimator_)\nprint(gscv2.score(X_test,y_test)) ### if you can see the f1score actually drops", "LogisticRegression(C=500, class_weight=None, dual=False, fit_intercept=True,\n intercept_scaling=1, l1_ratio=None, max_iter=100,\n multi_class='auto', n_jobs=None, penalty='l2',\n random_state=None, solver='lbfgs', tol=0.0001, verbose=0,\n warm_start=False)\n0.7184466019417476\n" ], [ "gscv3 = GridSearchCV(LogisticRegression(),{'C':[545,550,555]},cv=5,scoring='f1')\ngscv3.fit(X_train,y_train)\nprint(gscv3.best_estimator_)\nprint(gscv3.score(X_test,y_test)) ### this is the best f1 score we achive at 5-fold", "LogisticRegression(C=550, class_weight=None, dual=False, fit_intercept=True,\n intercept_scaling=1, l1_ratio=None, max_iter=100,\n multi_class='auto', n_jobs=None, penalty='l2',\n random_state=None, solver='lbfgs', tol=0.0001, verbose=0,\n warm_start=False)\n0.7378640776699029\n" ], [ "gscv4 = GridSearchCV(LogisticRegression(),{'C':[0.0001,0.001,0.01,0.1,1,100,1000,10000]},cv=10,scoring='f1')\ngscv4.fit(X_train,y_train)\nprint(gscv4.best_estimator_)\nprint(gscv4.score(X_test,y_test)) ### at 10 k-fold", "LogisticRegression(C=1, class_weight=None, dual=False, fit_intercept=True,\n intercept_scaling=1, l1_ratio=None, max_iter=100,\n multi_class='auto', n_jobs=None, penalty='l2',\n random_state=None, solver='lbfgs', tol=0.0001, verbose=0,\n warm_start=False)\n0.7281553398058253\n" ] ], [ [ "- ## Randomsearch Cross-validation", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import RandomizedSearchCV\nfrom scipy.stats import uniform,norm\ndistributions = dict(C=uniform(loc=0, scale=10),penalty=['l2', 'l1'])\nrscv = RandomizedSearchCV(LogisticRegression(), distributions,)\nrscv.fit(X_train,y_train)", "_____no_output_____" ], [ "print(rscv.best_estimator_)\nprint(rscv.score(X_test,y_test))", "LogisticRegression(C=9.175306049254385, class_weight=None, dual=False,\n fit_intercept=True, intercept_scaling=1, l1_ratio=None,\n max_iter=100, multi_class='auto', n_jobs=None, penalty='l2',\n random_state=None, solver='lbfgs', tol=0.0001, verbose=0,\n warm_start=False)\n0.7910447761194029\n" ], [ "distributions2 = dict(C=norm(),penalty=['l2', 'l1'])\nrscv2 = RandomizedSearchCV(LogisticRegression(), distributions,)\nrscv2.fit(X_train,y_train)", "_____no_output_____" ], [ "print(rscv2.best_estimator_)\nprint(rscv2.score(X_test,y_test))", "LogisticRegression(C=9.979590644335957, class_weight=None, dual=False,\n fit_intercept=True, intercept_scaling=1, l1_ratio=None,\n max_iter=100, multi_class='auto', n_jobs=None, penalty='l2',\n random_state=None, solver='lbfgs', tol=0.0001, verbose=0,\n warm_start=False)\n0.7910447761194029\n" ], [ "plt.hist(norm.rvs(size=1000))", "_____no_output_____" ] ], [ [ ">> ## Checking how sparcity works ", "_____no_output_____" ] ], [ [ "li = [0.001,0.01,0.1,1,10,100,1000]\nzro = []\nfor x in li:\n l1lr = LogisticRegression(C=x,penalty='l1')\n l1lr.fit(X_train,y_train)\n zro.append(np.count_nonzero(l1lr.coef_))\npd.DataFrame({'C':li,'No of non-zero coeff':zro})", "_____no_output_____" ], [ "li = [10**-6,10**-5,10**-4,0.001,0.01,0.1,1,10,100,1000,10000]\nzrol1 = []\nzrol2 = []\nfor x in li:\n l1lr = LogisticRegression(C=x,penalty='l1', tol=0.01 ,solver='saga')\n l2lr = LogisticRegression(C=x,penalty='l2', tol=0.01, solver='saga')\n l1lr.fit(X_train,y_train)\n l2lr.fit(X_train,y_train)\n zrol1.append(np.count_nonzero(l1lr.coef_))\n zrol2.append(np.count_nonzero(l2lr.coef_))\nlmbda = [1/x for x in li]\npd.DataFrame({'Lambda':lmbda,'No of non-zero coeff L1':zrol1,'And L2':zrol2})", "_____no_output_____" ] ], [ [ "> so as lambda increases more and more of coeff tends to zero", "_____no_output_____" ] ], [ [ " ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
c52d12905adc71681319c3b5ecf5e5e2a0a4235a
178,972
ipynb
Jupyter Notebook
ex3.ipynb
ShisheerKaushik24/IBM-Quantum-challenge-2021-
45e69b5c42e21758f4295bc2d56f116bacfb5aad
[ "Apache-2.0" ]
null
null
null
ex3.ipynb
ShisheerKaushik24/IBM-Quantum-challenge-2021-
45e69b5c42e21758f4295bc2d56f116bacfb5aad
[ "Apache-2.0" ]
null
null
null
ex3.ipynb
ShisheerKaushik24/IBM-Quantum-challenge-2021-
45e69b5c42e21758f4295bc2d56f116bacfb5aad
[ "Apache-2.0" ]
null
null
null
186.235172
68,860
0.899018
[ [ [ "# Exercise 3 - Quantum error correction\n\n## Historical background\n\nShor's algorithm gave quantum computers a worthwhile use case—but the inherent noisiness of quantum mechanics meant that building hardware capable of running such an algorithm would be a huge struggle. In 1995, Shor released another landmark paper: a scheme that shared quantum information over multiple qubits in order to reduce errors.[1]\n\nA great deal of progress has been made over the decades since. New forms of error correcting codes have been discovered, and a large theoretical framework has been built around them. The surface codes proposed by Kitaev in 1997 have emerged as the leading candidate, and many variations on the original design have emerged since then. But there is still a lot of progress to make in tailoring codes to the specific details of quantum hardware.[2]\n\nIn this exercise we'll consider a case in which artificial 'errors' are inserted into a circuit. Your task is to design the circuit such that these additional gates can be identified.\n\nYou'll then need to think about how to implement your circuit on a real device. This means you'll need to tailor your solution to the layout of the qubits. Your solution will be scored on how few entangling gates (the noisiest type of gate) that you use.\n\n### References\n1. Shor, Peter W. \"Scheme for reducing decoherence in quantum computer memory.\" Physical review A 52.4 (1995): R2493.\n1. Dennis, Eric, et al. \"Topological quantum memory.\" Journal of Mathematical Physics 43.9 (2002): 4452-4505.", "_____no_output_____" ], [ "## The problem of errors\n\nErrors occur when some spurious operation acts on our qubits. Their effects cause things to go wrong in our circuits. The strange results you may have seen when running on real devices is all due to these errors.\n\nThere are many spurious operations that can occur, but it turns out that we can pretend that there are only two types of error: bit flips and phase flips.\n\nBit flips have the same effect as the `x` gate. They flip the $|0\\rangle$ state of a single qubit to $|1\\rangle$ and vice-versa. Phase flips have the same effect as the `z` gate, introducing a phase of $-1$ into superpositions. Put simply, they flip the $|+\\rangle$ state of a single qubit to $|-\\rangle$ and vice-versa.\n\nThe reason we can think of any error in terms of just these two is because any error can be represented by some matrix, and any matrix can be written in terms of the matrices $X$ and $Z$. Specifically, for any single qubit matrix $M$,\n\n$$\nM = \\alpha I + \\beta X + \\gamma XZ + \\delta Z,\n$$\n\nfor some suitably chosen values $\\alpha$, $\\beta$, $\\gamma$ and $\\delta$.\n\nSo whenever we apply this matrix to some single qubit state $|\\psi\\rangle$ we get\n\n$$\nM |\\psi\\rangle = \\alpha |\\psi\\rangle + \\beta X |\\psi\\rangle + \\gamma XZ |\\psi\\rangle + \\delta Z |\\psi\\rangle.\n$$\n\nThe resulting superposition is composed of the original state, the state we'd have if the error was just a bit flip, the state for just a phase flip and the state for both. If we had some way to measure whether a bit or phase flip happened, the state would then collapse to just one possibility. And our complex error would become just a simple bit or phase flip.\n\nSo how do we detect whether we have a bit flip or a phase flip (or both). And what do we do about it once we know? Answering these questions is what quantum error correction is all about.\n\n\n", "_____no_output_____" ], [ "## An overly simple example\n\nOne of the first quantum circuits that most people ever write is to create a pair of entangled qubits. In this journey into quantum error correction, we'll start the same way.", "_____no_output_____" ] ], [ [ "from qiskit import QuantumCircuit, Aer\n\n# Make an entangled pair\nqc_init = QuantumCircuit(2)\nqc_init.h(0)\nqc_init.cx(0,1)\n\n# Draw the circuit\ndisplay(qc_init.draw('mpl'))\n\n# Get an output\nqc = qc_init.copy()\nqc.measure_all()\njob = Aer.get_backend('qasm_simulator').run(qc)\njob.result().get_counts()", "_____no_output_____" ] ], [ [ "Here we see the expected result when we run the circuit: the results `00` and `11` occurring with equal probability.\n\nBut what happens when we have the same circuit, but with a bit flip 'error' inserted manually.", "_____no_output_____" ] ], [ [ "# Make bit flip error\nqc_insert = QuantumCircuit(2)\nqc_insert.x(0)\n\n# Add it to our original circuit\nqc = qc_init.copy()\nqc = qc.compose(qc_insert)\n\n# Draw the circuit\ndisplay(qc.draw('mpl'))\n\n# Get an output\nqc.measure_all()\njob = Aer.get_backend('qasm_simulator').run(qc)\njob.result().get_counts()", "_____no_output_____" ] ], [ [ "Now the results are different: `01` and `10`. The two bit values have gone from always agreeing to always disagreeing. In this way, we detect the effect of the error.\n\nAnother way we can detect it is to undo the entanglement with a few more gates. If there are no errors, we return to the initial $|00\\rangle$ state.", "_____no_output_____" ] ], [ [ "# Undo entanglement\nqc_syn = QuantumCircuit(2)\nqc_syn.cx(0,1)\nqc_syn.h(0)\n\n# Add this after the error\nqc = qc_init.copy()\nqc = qc.compose(qc_syn)\n\n# Draw the circuit\ndisplay(qc.draw('mpl'))\n\n# Get an output\nqc.measure_all()\njob = Aer.get_backend('qasm_simulator').run(qc)\njob.result().get_counts()", "_____no_output_____" ] ], [ [ "But what happens if there are errors one of the qubits? Try inserting different errors to find out.\n\nHere's a circuit with all the components we've introduced so far: the initialization `qc_init`, the inserted error in `qc_insert` and the final `qc_syn` which ensures that the final measurement gives a nice definite answer.", "_____no_output_____" ] ], [ [ "# Define an error\nqc_insert = QuantumCircuit(2)\nqc_insert.x(0)\n\n# Undo entanglement\nqc_syn = QuantumCircuit(2)\nqc_syn.cx(0,1)\nqc_syn.h(0)\n\n# Add this after the error\nqc = qc_init.copy()\nqc = qc.compose(qc_insert)\nqc = qc.compose(qc_syn)\n\n# Draw the circuit\ndisplay(qc.draw('mpl'))\n\n# Get an output\nqc.measure_all()\njob = Aer.get_backend('qasm_simulator').run(qc)\njob.result().get_counts()", "_____no_output_____" ] ], [ [ "You'll find that the output tells us exactly what is going on with the errors. Both the bit and phase flips can be detected. The bit value on the left is `1` only if there is a bit flip (and so if we have inserted an `x(0)` or `x(1)`). The bit on the right similarly tells us there is a phase flip (an inserted `z(0)` or `z(1)`).\n\nThis ability to detect and distinguish bit and phase flips is very useful. But it is not quite useful enough. We can only tell *what type* of errors are happening, but not *where*. Without more detail, it is not possible to figure out how to remove the effects of these operations from our computations. For quantum error correction we therefore need something bigger and better.\n\nIt's your task to do just that! Here's a list of what you need to submit. Everything here is then explained by the example that follows.", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-success\">\n\n<b>Goal</b>\n\nCreate circuits which can detect `x` and `z` errors on two qubits.\nYou can come up with a solution of your own. Or just tweak the almost valid solution given below.\n\n</div>\n\n<div class=\"alert alert-block alert-danger\">\n<b>What to submit</b> \n\n* You need to supply two circuits:\n * `qc_init`: Prepares the qubits (of which there are at least two) in a desired initial state;\n * `qc_syn`: Measures a subset of the qubits.\n\n* The artificial errors to be inserted are `x` and `z` gates on two particular qubits. You need to pick the two qubits to be used for this (supplied as the list `error_qubits`).\n\n* There are 16 possible sets of errors to be inserted (including the trivial case of no errors). The measurement result of `qc_syn` should output a unique bit string for each. The grader will return the error message *'Please make sure the circuit is created to the initial layout.'* if this is not satisfied.\n\n* The grader will compile the complete circuit for the backend `ibmq_tokyo` (a retired device). To show that your solution is tailor made for the device, this transpilation should not change the number of `cx` gates. If it does, you will get the error message *'Please make sure the circuit is created to the initial layout.'*\n \n* To guide the transpilation, you'll need to tell the transpiler which qubits on the device should be used as which qubits in your circuit. This is done with an `initial_layout` list.\n \n* You may start with the example given below, which can become a valid answer with a few tweaks.\n</div>", "_____no_output_____" ], [ "## A better example: the surface code", "_____no_output_____" ] ], [ [ "from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister, Aer, transpile\n\nimport qiskit.tools.jupyter\nfrom qiskit.test.mock import FakeTokyo", "_____no_output_____" ], [ "code = QuantumRegister(5,'code')", "_____no_output_____" ] ], [ [ "In this example we'll use 5 qubits that we'll call code qubits. To keep track of them, we'll define a special quantum register.", "_____no_output_____" ], [ "We'll also have an additional four qubits we'll call syndrome qubits.", "_____no_output_____" ] ], [ [ "syn = QuantumRegister(4,'syn')", "_____no_output_____" ] ], [ [ "Similarly we define a register for the four output bits, used when measuring the syndrome qubits.", "_____no_output_____" ] ], [ [ "out = ClassicalRegister(4,'output')", "_____no_output_____" ] ], [ [ "We consider the qubits to be laid out as follows, with the code qubits forming the corners of four triangles, and the syndrome qubits living inside each triangle.\n\n```\nc0----------c1\n| \\ s0 / |\n| \\ / |\n| s1 c2 s2 |\n| / \\ |\n| / s3 \\ |\nc3----------c4\n```\n\nFor each triangle we associate a stabilizer operation on its three qubits. For the qubits on the sides, the stabilizers are ZZZ. For the top and bottom ones, they are XXX.\n\nThe syndrome measurement circuit corresponds to a measurement of these observables. This is done in a similar way to surface code stabilizers (in fact, this code is a small version of a surface code).\n<div class=\"alert alert-block alert-danger\">\n \n<b>Warning</b> \n\nYou should remove the barriers before submitting the code as it might interfere with transpilation. It is given here for visualization only. \n</div>", "_____no_output_____" ], [ "The initialization circuit prepares an eigenstate of these observables, such that the output of the syndrome measurement will be `0000` with certainty.", "_____no_output_____" ] ], [ [ "qc_syn = QuantumCircuit(code,syn,out)\n\n\n# Left ZZZ\nqc_syn.cx(code[0],syn[1])\nqc_syn.cx(code[2],syn[1])\nqc_syn.cx(code[3],syn[1])\n#qc_syn.barrier()\n\n# Right ZZZ\nqc_syn.swap(code[1],syn[0])\nqc_syn.cx(syn[0],syn[2])\nqc_syn.swap(code[1],syn[0])\nqc_syn.cx(code[2],syn[2])\n#qc_syn.swap(code[1],syn[2])\n#qc_syn.swap(code[4],syn[2])\nqc_syn.cx(code[4],syn[2])\n#qc_syn.barrier()\n\n# Top XXX\nqc_syn.h(syn[0])\n#qc_syn.swap(syn[2],code[1])\nqc_syn.cx(syn[0],code[0])\nqc_syn.cx(syn[0],code[1])\n#qc_syn.swap(syn[2],code[4])\nqc_syn.cx(syn[0],code[2])\nqc_syn.h(syn[0])\n#qc_syn.barrier()\n\n# Bottom XXX\nqc_syn.h(syn[3])\nqc_syn.cx(syn[3],code[2])\nqc_syn.cx(syn[3],code[3])\nqc_syn.cx(syn[3],code[4])\nqc_syn.h(syn[3])\n#qc_syn.barrier()\n\n\n# Measure the auxilliary qubits\nqc_syn.measure(syn,out)\nqc_syn.draw('mpl')", "_____no_output_____" ], [ "qc_init = QuantumCircuit(code,syn,out)\n\nqc_init.h(syn[0])\n\nqc_init.cx(syn[0],code[0])\n#qc_init.swap(syn[0],code[1])\nqc_init.cx(syn[0],code[1])\nqc_init.cx(syn[0],code[2])\n#qc_init.swap(syn[2],code[1])\nqc_init.cx(code[2],syn[0])\n\nqc_init.h(syn[3])\nqc_init.cx(syn[3],code[2])\nqc_init.cx(syn[3],code[3])\nqc_init.cx(syn[3],code[4])\nqc_init.cx(code[4],syn[3])\n\n#qc_init.barrier()\nqc_init.draw('mpl')", "_____no_output_____" ] ], [ [ "Let's check that is true.", "_____no_output_____" ] ], [ [ "qc = qc_init.compose(qc_syn)\ndisplay(qc.draw('mpl'))\n\njob = Aer.get_backend('qasm_simulator').run(qc)\njob.result().get_counts()", "_____no_output_____" ] ], [ [ "Now let's make a circuit with which we can insert `x` and `z` gates on our two code qubits. For this we'll need to choose which of the 5 code qubits we have will correspond to the two required for the validity condition.\n\nFor this code we need to choose opposite corners.", "_____no_output_____" ] ], [ [ "error_qubits = [0,4]", "_____no_output_____" ] ], [ [ "Here 0 and 4 refer to the positions of the qubits in the following list, and hence are qubits `code[0]` and `code[4]`.", "_____no_output_____" ] ], [ [ "qc.qubits", "_____no_output_____" ] ], [ [ "To check that the code does as we require, we can use the following function to create circuits for inserting artificial errors. Here the errors we want to add are listed in `errors` as a simple text string, such as `x0` for an `x` on `error_qubits[0]`.", "_____no_output_____" ] ], [ [ "def insert(errors,error_qubits,code,syn,out):\n\n qc_insert = QuantumCircuit(code,syn,out)\n\n if 'x0' in errors:\n qc_insert.x(error_qubits[0])\n if 'x1' in errors:\n qc_insert.x(error_qubits[1])\n if 'z0' in errors:\n qc_insert.z(error_qubits[0])\n if 'z1' in errors:\n qc_insert.z(error_qubits[1])\n \n return qc_insert", "_____no_output_____" ] ], [ [ "Rather than all 16 possibilities, let's just look at the four cases where a single error is inserted.", "_____no_output_____" ] ], [ [ "for error in ['x0','x1','z0','z1']:\n \n qc = qc_init.compose(insert([error],error_qubits,code,syn,out)).compose(qc_syn)\n job = Aer.get_backend('qasm_simulator').run(qc)\n \n print('\\nFor error '+error+':')\n counts = job.result().get_counts()\n for output in counts:\n print('Output was',output,'for',counts[output],'shots.')", "\nFor error x0:\nOutput was 0010 for 1024 shots.\n\nFor error x1:\nOutput was 0100 for 1024 shots.\n\nFor error z0:\nOutput was 0001 for 1024 shots.\n\nFor error z1:\nOutput was 1000 for 1024 shots.\n" ] ], [ [ "Here we see that each bit in the output is `1` when a particular error occurs: the leftmost detects `z` on `error_qubits[1]`, then the next detects `x` on `error_qubits[1]`, and so on.\n\n<div class=\"alert alert-block alert-danger\">\n \n<b>Attention</b> \n\nThe correct ordering of the output is important for this exercise. Please follow the order as given below:\n1. The leftmost output represents `z` on `code[1]`.\n2. The second output from left represents `x` on `code[1]`.\n3. The third output from left represents `x` on `code[0]`.\n4. The rightmost output represents `z` on `code[0]`.\n \n</div>\n\nWhen more errors affect the circuit, it becomes hard to unambiguously tell which errors occurred. However, by continuously repeating the syndrome readout to get more results and analysing the data through the process of decoding, it is still possible to determine enough about the errors to correct their effects.\n\nThese kinds of considerations are beyond what we will look at in this challenge. Instead we'll focus on something simpler, but just as important: the fewer errors you have, and the simpler they are, the better your error correction will be. To ensure this, your error correction procedure should be tailor-made to the device you are using.\n\nIn this challenge we'll be considering the device `ibmq_tokyo`. Though the real version of this was retired some time ago, it still lives on as one of the mock backends.", "_____no_output_____" ] ], [ [ "# Please use the backend given here\nbackend = FakeTokyo()\nbackend", "_____no_output_____" ] ], [ [ "As a simple idea of how our original circuit is laid out, let's see how many two-qubit gates it contains.", "_____no_output_____" ], [ "If we were to transpile it to the `ibmq_tokyo` backend, remapping would need to occur at the cost of adding for two-qubit gates.", "_____no_output_____" ] ], [ [ "qc1 = transpile(qc,backend,basis_gates=['u','cx'], optimization_level=3)\nqc1.num_nonlocal_gates()", "_____no_output_____" ], [ "qc = qc_init.compose(qc_syn)\nqc = transpile(qc, basis_gates=['u','cx'])\nqc.num_nonlocal_gates()", "_____no_output_____" ] ], [ [ "We can control this to an extent by looking at which qubits on the device would be best to use as the qubits in the code. If we look at what qubits in the code need to be connected by two-qubit gates in `qc_syn`, we find the following required connectivity graph.\n\n```\nc0....s0....c1\n: : : \n: : :\ns1....c2....s2\n: : :\n: : :\nc3....s3....c4\n```\n\nNo set of qubits on `ibmq_tokyo` can provide this, but certain sets like 0,1,2,5,6,7,10,11,12 come close. So we can set an `initial_layout` to tell the transpiler to use these.\n\n", "_____no_output_____" ] ], [ [ "initial_layout = [0,2,6,10,12,1,5,7,11]", "_____no_output_____" ] ], [ [ "These tell the transpiler which qubits on the device to use for the qubits in the circuit (for the order they are listed in `qc.qubits`). So the first five entries in this list tell the circuit which qubits to use as the code qubits and the next four entries in this list are similarly for the syndrome qubits. So we use qubit 0 on the device as `code[0]`, qubit 2 as `code[1]` and so on.\n\nNow let's use this for the transpilation.", "_____no_output_____" ] ], [ [ "qc2 = transpile(qc,backend,initial_layout=initial_layout, basis_gates=['u','cx'], optimization_level=3)\nqc2.num_nonlocal_gates()", "_____no_output_____" ] ], [ [ "Though transpilation is a random process, you should typically find that this uses less two-qubit gates than when no initial layout is provided (you might need to re-run both transpilation code multiple times to see it as transpilation is a random process).\n\nNevertheless, a properly designed error correction scheme should not need any remapping at all. It should be written for the exact device used, and the number of two-qubit gates should remain constant with certainty. This is a condition for a solution to be valid. So you'll not just need to provide an `initial_layout`, but also design your circuits specifically for that layout.\n\nBut that part we leave up to you!", "_____no_output_____" ] ], [ [ "# Check your answer using following code\nfrom qc_grader import grade_ex3\ngrade_ex3(qc_init,qc_syn,error_qubits,initial_layout)", "Grading your answer for ex3. Please wait...\n\nCongratulations 🎉! Your answer is correct.\nYour cost is 266.\nFeel free to submit your answer.\n\n" ], [ "# Submit your answer. You can re-submit at any time.\nfrom qc_grader import submit_ex3\nsubmit_ex3(qc_init,qc_syn,error_qubits,initial_layout)", "Submitting your answer for ex3. Please wait...\nSuccess 🎉! Your answer has been submitted.\n" ] ], [ [ "## Additional information\n\n**Created by:** James Wootton, Rahul Pratap Singh\n\n**Version:** 1.0.0", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
c52d1cd9c3617ac890b4744925fad1b1ef9b3c8f
5,828
ipynb
Jupyter Notebook
notebooks/deep_learning/nn_byod_simple_fastai.ipynb
dexX7/codecentric.AI-bootcamp
b6edbf6b450718bb001aad941266daf3c17ed84e
[ "MIT" ]
22
2018-11-30T14:41:25.000Z
2022-03-09T13:02:43.000Z
notebooks/deep_learning/nn_byod_simple_fastai.ipynb
dexX7/codecentric.AI-bootcamp
b6edbf6b450718bb001aad941266daf3c17ed84e
[ "MIT" ]
6
2020-03-24T16:59:31.000Z
2022-03-11T23:45:30.000Z
notebooks/deep_learning/nn_byod_simple_fastai.ipynb
dexX7/codecentric.AI-bootcamp
b6edbf6b450718bb001aad941266daf3c17ed84e
[ "MIT" ]
10
2018-11-30T13:21:01.000Z
2022-03-09T13:02:44.000Z
25.561404
283
0.579787
[ [ [ "# Simple Image Classifier - Bring Your Own Data\n\n## Neuronale Netze auf https://bootcamp.codecentric.ai\n\nJetzt wird es Zeit, mit einem eigenen Dataset zu experimentieren.\n\nHinweis: Wenn du auf einem Rechner trainierst, wo keine gut GPU verfügbar ist, kann dies sehr lange dauern. Evtl. möchtest du in dem Fall das Kapitel zu \"Training in der Cloud\" vorziehen und das Experiment dort durchführen.\n\n\n\nImports und Settings", "_____no_output_____" ] ], [ [ "from fastai.basics import *\nfrom fastai.vision import *", "_____no_output_____" ] ], [ [ "### Ordner festlegen, wo Daten liegen \n\nÜberlege dir, welche Bilder du klassifizieren möchtest. \nWenn du dich zum Beispiel für Vogel vs. Turnschuh entscheidest, lege eine Ordnerstruktur an - z.B.:\n\n- /data/byod/train/\n - vogel/bild1.jpg\n - vogel/bild2.jpg\n - vogel/...\n - turnschuh/bild1.jpg\n - turnschuh/...\n \nDie Namen der Ordner sind wichtig - das sind deine Label. Die Namen der Bilder sind egal (es müssen auch nicht nur jpg sein).\n\nDie Bilder werden anhand der Ordner \"gelabelt\".\n\nWieviele Bilder braucht man dafür? Fang doch einfach mal mit 10-20 Bildern pro Kategorie an und probiere es aus ... Vllt. findest du auch eine Möglichkeit \"automatisiert\" mehrere Bilder herunter zu laden. \n\nOft ist es ein großer Aufwand erstmal genügend Daten in der entsprechenden Qualität zu bekommen.", "_____no_output_____" ] ], [ [ "DATA = \"/data/byod/\"\nTRAIN = DATA + \"train/\"", "_____no_output_____" ] ], [ [ "Der folgende Befehl macht:\n\n* Daten aus Ordner laden (bzw. einen Loader definieren)\n* Labels aus Ordner Namen zuordnen (alle Bilder im Ordner Kiwi sind Kiwis)\n* Split Train/Valid (20 %)\n* Bilder verkleinern (wenn du nur auf CPU trainierst wähle eine kleine Size, sonst dauert das Training sehr lang)\n* (und einiges mehr)", "_____no_output_____" ] ], [ [ "data = ImageDataBunch.from_folder(TRAIN, valid_pct=0.2, size=200, bs=20)", "_____no_output_____" ] ], [ [ "Wie sehen unsere Daten aus? Einfach mal ein paar Beispiele der Trainigsdaten anzeigen:", "_____no_output_____" ] ], [ [ "data.show_batch(rows=3, figsize=(6, 6))", "_____no_output_____" ] ], [ [ "Der folgende Befehl macht:\n\n* Erzeuge ein CNN\n* mit einer Standard Architektur (vortrainiertes ResNet)\n* Architektur wird automatisch auf neue Daten angepasst (Bildgrößen, Klassen, etc.)\n* gebe im Trainingsloop die Metrik \"Accuracy\" aus\n* unter der Haube werden viele Standard-Werte gesetzt (welcher Optimizer, Hyperparameter, Best Practices, ...)", "_____no_output_____" ] ], [ [ "learn = cnn_learner(data, models.resnet18, metrics=accuracy)", "_____no_output_____" ] ], [ [ "### Start Training", "_____no_output_____" ] ], [ [ "learn.fit(1)", "_____no_output_____" ] ], [ [ "### Jetzt mit dem trainierten Modell eine Vorhersage machen\n\nWenn du ein paar Bilder testen möchtest, dann lege unter /data/byod/ einen test Ordner an und kopiere ein paar Bilder hinein (Bilder, die nicht beim Training verwendet wurden). Hierbei musst du keine Unterordner anlegen (das Modell soll ja vorhersagen, welche Klasse es ist)\n\nJetzt nehmen wir ein random Bild aus dem Test Ordner:", "_____no_output_____" ] ], [ [ "TEST = DATA + \"test/\"\nTEST_IMAGES = os.listdir(TEST)\nTEST_IMAGES", "_____no_output_____" ], [ "test_img = open_image(TEST + random.choice(TEST_IMAGES))\ntest_img", "_____no_output_____" ] ], [ [ "und machen eine prediction mit dem Modell:", "_____no_output_____" ] ], [ [ "learn.predict(test_img)", "_____no_output_____" ] ], [ [ "## Credits\n\nFür die Übung verwenden wir die fast.ai Library - siehe http://fast.ai", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
c52d1d3ccb3e072f6dea8300d34583ca65fcd4a1
7,405
ipynb
Jupyter Notebook
Docking/OEdocking/OEdocking-files/OEdock.ipynb
MobleyLab/T4L-temperature-effects
807b88cef5b8d2d1d318519fd5e2359428d9e424
[ "MIT" ]
null
null
null
Docking/OEdocking/OEdocking-files/OEdock.ipynb
MobleyLab/T4L-temperature-effects
807b88cef5b8d2d1d318519fd5e2359428d9e424
[ "MIT" ]
null
null
null
Docking/OEdocking/OEdocking-files/OEdock.ipynb
MobleyLab/T4L-temperature-effects
807b88cef5b8d2d1d318519fd5e2359428d9e424
[ "MIT" ]
null
null
null
30.599174
124
0.55476
[ [ [ "from openeye import oedocking\nfrom openeye import oeomega\nfrom openeye import oechem", "_____no_output_____" ], [ "# Load the T4 receptor; the files of the different receptors used in this study can be found in:\n# T4L-temperature-effects/Docking/OEdock/binders-non-binders/files-to-prepare-receptors/cryo-closed-raw \nimstr = oemolistream('receptor.pdb')\nprotein = oechem.OEGraphMol()\noechem.OEReadMolecule(imstr, protein)\n#imstr.close()\n\n# Load a reference ligand to specify the binding site\nligand = oechem.OEGraphMol()\nimstr = oechem.oemolistream('toluene_oe.mol2')\noechem.OEReadMolecule(imstr, ligand)\nimstr.close()\n\n# Initialize the receptor for docking\nreceptor = oechem.OEGraphMol()\noedocking.OEMakeReceptor(receptor, protein, ligand)", "_____no_output_____" ], [ "# Set the docking method and resolution \ndock_method = oedocking.OEDockMethod_Chemscore\ndock_resolution = oedocking.OESearchResolution_Default\nsdtag = oedocking.OEDockMethodGetName( dock_method )\n\n# OEDocking object\ndock = oedocking.OEDock( dock_method, dock_resolution)\n\nif not dock.Initialize(receptor):\n raise Exception(\"Unable to initialize Docking with {0}\".format(self.args.receptor))", "_____no_output_____" ], [ "def dock_molecule( dock: \"OEDock\", sdtag: str, num_poses: int, mcmol ) -> tuple:\n ''' Docks the multiconfomer molecule, with the given number of poses\n Returns a tuple of the docked molecule (dockedMol) and its score\n i.e. ( dockedMol, score )\n '''\n dockedMol = oechem.OEMol()\n\n # Dock the molecule\n res = dock.DockMultiConformerMolecule(dockedMol, mcmol, num_poses)\n \n if res == oedocking.OEDockingReturnCode_Success:\n \n # Label the molecule with the score and SDTag\n oedocking.OESetSDScore(dockedMol, dock, sdtag)\n dock.AnnotatePose(dockedMol)\n score = dock.ScoreLigand(dockedMol)\n oechem.OESetSDData(dockedMol, sdtag, \"{}\".format(score))\n return dockedMol, score\n \n else:\n # raise an exception if the docking is not successful\n raise Exception(\"Unable to dock ligand {0} to receptor\".format( dockedMol ))", "_____no_output_____" ], [ "# Decoys created via DUD-E\ndecoys = open(\"decoys.smi\").read().splitlines()\n\n# Zinc list - described as binders\nzinc = open(\"zinc.smi\").read().splitlines()\n\n# Experimentally validated active compounds - Mobley work + Minh et al \nactives = open(\"mobley-minh-actives.smi\").read().splitlines()\n\n\n#combine all the compounds in one list\nall_compounds = decoys + zinc + actives", "_____no_output_____" ], [ "# save list to file \nwith open('all_compounds.smi', 'w') as f:\n for item in all_compounds:\n f.write(\"%s\\n\" % item)", "_____no_output_____" ], [ "omega = oeomega.OEOmega()\nomega.SetStrictStereo(False) \n\n# Generate conformers then dock\ninmols = []\nusednames = []\nfor idx,line in enumerate(all_compounds):\n tmp = line.split()\n smi = tmp[0]\n mol = oechem.OEMol()\n name = tmp[1]\n if name=='' or name==None or len(name)<3:\n #Define alternate name based on index\n name = 'mol%s smiles %s' % (idx, smi)\n print(\"No name found on line %s; using alternate name %s...\" % (idx, name))\n if not name in usednames: \n usednames.append(name)\n oechem.OEParseSmiles(mol, smi)\n mol.SetTitle(name)\n builtOK = omega(mol)\n inmols.append(mol)\n else:\n continue\n\n# Define how many docked poses to generate per molecule\nnum_poses = 2\n\n\n# Open a filestream to write the docked poses\nscores = {}\nwith oechem.oemolostream( 'dock-results-Chemscore.sdf') as ofs:\n\n # Loop over 3D molecules from the input filestream\n for mcmol in inmols:\n\n # Call docking function\n dockedMol, score = dock_molecule( dock, sdtag, num_poses, mcmol )\n print(\"{} {} score = {:.4f}\".format(sdtag, dockedMol.GetTitle(), score))\n\n # Write docked molecules to output filestream\n oechem.OEWriteMolecule(ofs, dockedMol)\n \n # Store scores\n scores[ mcmol.GetTitle()] = score", "_____no_output_____" ], [ "active_smiles_by_name = {}\nfile = open('mobley-minh-actives', 'r') \ntext = file.readlines()\nfile.close()\nfor line in text:\n tmp = line.split()\n active_smiles_by_name[tmp[1]] = tmp[0]\n\n# Build list of titles sorted by score\nsorted_titles = list(scores.keys())\nsorted_titles.sort( key = lambda title: scores[title] )\n\n# Count how many actives are found at which ranks\nct = 0\nfnd_actives = []\nfor active_name in active_smiles_by_name.keys():\n if active_name in sorted_titles:\n ct += 1\n print(\"Active %s found in docking results at rank %s\" % ( active_name, sorted_titles.index(active_name)))\n fnd_actives.append( active_name )\n\nprint(\"Total compounds: %s\" % len(sorted_titles))\n\n#Find number of actives\nn_actives = len(fnd_actives)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
c52d1da3dcc8568f642e67ea22ce8459ecad1589
86,708
ipynb
Jupyter Notebook
.ipynb_checkpoints/plotly_express-checkpoint.ipynb
hopezh/learn_plotly
8da7cfd0cd3c42c954eb9b869021cd32f5e007bf
[ "MIT" ]
null
null
null
.ipynb_checkpoints/plotly_express-checkpoint.ipynb
hopezh/learn_plotly
8da7cfd0cd3c42c954eb9b869021cd32f5e007bf
[ "MIT" ]
null
null
null
.ipynb_checkpoints/plotly_express-checkpoint.ipynb
hopezh/learn_plotly
8da7cfd0cd3c42c954eb9b869021cd32f5e007bf
[ "MIT" ]
null
null
null
21.451757
110
0.313881
[ [ [ "import plotly.express as px\n\ndf = px.data.gapminder()\n\ndf.head()", "_____no_output_____" ], [ "df2007 = df.query('year == 2007')\ndf2007.head()", "_____no_output_____" ], [ "px.histogram(df, x='lifeExp')", "_____no_output_____" ], [ "px.histogram(df2007, x='lifeExp')", "_____no_output_____" ], [ "px.histogram(df2007, x='lifeExp', color='continent')", "_____no_output_____" ], [ "px.histogram(\n df2007, \n x='lifeExp', \n color='continent', \n marginal='rug', \n hover_name='country', \n hover_data=df2007.columns\n)", "_____no_output_____" ], [ "px.histogram(\n df2007, \n x='lifeExp', \n color='continent', \n marginal='rug', \n hover_name='country', \n hover_data=df2007.columns,\n y='pop', \n histfunc='sum'\n)", "_____no_output_____" ], [ "px.histogram(\n df2007, \n x='continent', \n hover_name='country', \n hover_data=df2007.columns,\n y='pop', \n histfunc='sum'\n)", "_____no_output_____" ], [ "px.bar(\n df2007, \n x='continent', \n color='lifeExp', \n hover_name='country', \n hover_data=df2007.columns,\n y='pop', \n)", "_____no_output_____" ], [ "px.sunburst(\n df2007, \n path=['continent', 'country'],\n color='lifeExp', \n hover_data=df2007.columns,\n values='pop'\n)", "_____no_output_____" ], [ "px.choropleth(\n df2007, \n locations='iso_alpha', \n color='lifeExp', \n hover_data=df2007.columns\n)", "_____no_output_____" ], [ "px.scatter(\n df2007, \n x='gdpPercap', \n y='lifeExp', \n log_x=True,\n color='continent', \n size='pop', \n size_max=45, \n facet_col='continent', \n facet_col_wrap=3, \n hover_data=df2007.columns, \n hover_name='country'\n)", "_____no_output_____" ], [ "px.scatter(\n df, \n x='gdpPercap', \n y='lifeExp', \n log_x=True,\n color='continent', \n size='pop', \n size_max=45, \n animation_frame='year', \n animation_group='country', \n range_y=[20, 100], \n hover_data=df2007.columns, \n hover_name='country'\n)", "_____no_output_____" ], [ "import plotly.express as px\ndf = px.data.gapminder()\npx.scatter(df, x=\"gdpPercap\", y=\"lifeExp\", animation_frame=\"year\", animation_group=\"country\",\n size=\"pop\", color=\"continent\", hover_name=\"country\",\n log_x=True, size_max=55, range_x=[100,100000], range_y=[25,90])", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
c52d3531778f41e06a1c30dac26bc6cdeef63ab6
2,912
ipynb
Jupyter Notebook
ml/Numpy tricks.ipynb
pgenevski/notebooks
186b0e41d1424cb33bb1dea8905c4aec3a7b4cc5
[ "MIT" ]
null
null
null
ml/Numpy tricks.ipynb
pgenevski/notebooks
186b0e41d1424cb33bb1dea8905c4aec3a7b4cc5
[ "MIT" ]
null
null
null
ml/Numpy tricks.ipynb
pgenevski/notebooks
186b0e41d1424cb33bb1dea8905c4aec3a7b4cc5
[ "MIT" ]
null
null
null
26
86
0.34375
[ [ [ "import numpy as np\n\nx = np.eye(200,200)", "_____no_output_____" ] ], [ [ "## Find values\nThis will find all values and return them as a 1D array", "_____no_output_____" ] ], [ [ "x[np.where(x==1)]", "_____no_output_____" ] ], [ [ "## Change values\nThis will change all ones to 6 and then all 6 to 7.", "_____no_output_____" ] ], [ [ "x[x==1] = 6\nx[np.where(x==6)] = 7\nx", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
c52d37ae1e0a829db30f203ed66abc7822cfeec0
3,946
ipynb
Jupyter Notebook
examples/Networks/Networks - Introduction.ipynb
BubbleStar/PredictCode
1c6a5544b1d9185a4547c54fddc630a3592da3ba
[ "Artistic-2.0" ]
1
2019-03-24T07:06:25.000Z
2019-03-24T07:06:25.000Z
examples/Networks/Networks - Introduction.ipynb
BubbleStar/PredictCode
1c6a5544b1d9185a4547c54fddc630a3592da3ba
[ "Artistic-2.0" ]
null
null
null
examples/Networks/Networks - Introduction.ipynb
BubbleStar/PredictCode
1c6a5544b1d9185a4547c54fddc630a3592da3ba
[ "Artistic-2.0" ]
null
null
null
44.337079
443
0.662696
[ [ [ "# Network based predictions\n\nThe state of the art in crime predication increasingly appears to be \"network based\". That is, looking at real street networks, and assigning risk to streets, rather than areal grid cells.\n\nThis is currently an introduction, a very brief literature review, and a plan of action.", "_____no_output_____" ], [ "# Literature review\n\nNot complete.\n\n1. Rosser at al. \"Predictive Crime Mapping: Arbitrary Grids or Street Networks?\" J Quant Criminol (2016). https://doi.org/10.1007/s10940-016-9321-x\n2. Shiode, Shiode, \"Microscale Prediction of Near-Future Crime Concentrations with Street-Level Geosurveillance\" Geographical Analysis (2014) 46 435–455 https://doi.org/10.1111/gean.12065\n3. Okabe et al. \"A kernel density estimation method for networks, its computational method and a GIS‐based tool\", \nInternational Journal of Geographical Information Science (2009) 23 https://doi.org/10.1080/13658810802475491\n\n\n\n\nSome comments:\n1. \"Prospective hotspotting on a network\". Algorithm is fairly clear. Some useful info on estimating parameters.\n2. Uses a space/time search window to form a statistical test of whether there is an \"outbreak\" of crime (so similar-ish to SatScan in some sense).\n3. Not about _crime_ as such, but the details on how to adapt KDE methods to a network are very detailed and will be useful.", "_____no_output_____" ], [ "# Action plan\n\n## Network data\n\n- Open street map. I started to look at off-line processing of data with this repository: https://github.com/MatthewDaws/OSMDigest\n - Might also consider [OSMNX](https://github.com/gboeing/osmnx)\n- [TIGER/Line shapefiles](https://www.census.gov/geo/maps-data/data/tiger-line.html) USA only, but a canonical source.\n- [Ordnance Survey](https://www.ordnancesurvey.co.uk/) is the canonical source of data in the UK.\n - I am very keen to use only [freely available](https://www.ordnancesurvey.co.uk/business-and-government/products/opendata-products.html) (potentially, as in Beer) data\n - I think the two appropriate products for us are:\n - [OS Open Roads](https://www.ordnancesurvey.co.uk/business-and-government/products/os-open-roads.html) This is vector data, and a single download.\n - [OS OpenMap Local](https://www.ordnancesurvey.co.uk/business-and-government/products/os-open-map-local.html) Available as vector and raster for each national grid reference square. (Or for all of the UK, but a massive download). The raster maps are very detailed. The vector data, for _roads_, seems no more complete than the \"Open Roads\" download. But does include _buildings_ in vector format (but no address level data).\n - [OS VectorMap](https://www.ordnancesurvey.co.uk/business-and-government/products/vectormap-district.html) Shape files are slightly less detailed than OpenMap; raster format is 1/2 the detail. Might want to support optionally.", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown" ] ]
c52d51c5fe19ffa5ff7fb1683c261f916364095c
23,494
ipynb
Jupyter Notebook
demo9/output/example_wasp14/run01/emcee_experiment.ipynb
onsB/starfish-demo
8a3daf1ba824d692a4bd32808d65b48997c2fe72
[ "MIT" ]
4
2015-12-08T14:53:55.000Z
2021-02-07T14:16:20.000Z
demo9/output/example_wasp14/run01/emcee_experiment.ipynb
onsB/starfish-demo
8a3daf1ba824d692a4bd32808d65b48997c2fe72
[ "MIT" ]
null
null
null
demo9/output/example_wasp14/run01/emcee_experiment.ipynb
onsB/starfish-demo
8a3daf1ba824d692a4bd32808d65b48997c2fe72
[ "MIT" ]
1
2021-02-07T14:25:16.000Z
2021-02-07T14:25:16.000Z
33.230552
186
0.520814
[ [ [ "Experimenting with my hack `star_so.py`", "_____no_output_____" ] ], [ [ "#!/usr/bin/env python\n\n# All of the argument parsing is done in the `parallel.py` module.\n\nimport numpy as np\nimport Starfish\nfrom Starfish.model import ThetaParam, PhiParam\n\n\n#import argparse\n#parser = argparse.ArgumentParser(prog=\"star_so.py\", description=\"Run Starfish fitting model in single order mode with many walkers.\")\n#parser.add_argument(\"--sample\", choices=[\"ThetaCheb\", \"ThetaPhi\", \"ThetaPhiLines\"], help=\"Sample the all stellar and nuisance parameters at the same time.\")\n#parser.add_argument(\"--samples\", type=int, default=5, help=\"How many samples to run?\")\n#parser.add_argument(\"--incremental_save\", type=int, default=0, help=\"How often to save incremental progress of MCMC samples.\")\n#parser.add_argument(\"--use_cov\", action=\"store_true\", help=\"Use the local optimal jump matrix if present.\")\n#args = parser.parse_args()\n\nimport os\n\nimport Starfish.grid_tools\nfrom Starfish.samplers import StateSampler\nfrom Starfish.spectrum import DataSpectrum, Mask, ChebyshevSpectrum\nfrom Starfish.emulator import Emulator\nimport Starfish.constants as C\nfrom Starfish.covariance import get_dense_C, make_k_func, make_k_func_region\n\nfrom scipy.special import j1\nfrom scipy.interpolate import InterpolatedUnivariateSpline\nfrom scipy.linalg import cho_factor, cho_solve\nfrom numpy.linalg import slogdet\nfrom astropy.stats import sigma_clip\n\nimport gc\nimport logging\n\nfrom itertools import chain\nfrom collections import deque\nfrom operator import itemgetter\nimport yaml\nimport shutil\nimport json\n\n\n\nStarfish.routdir = \"\"\n\n# list of keys from 0 to (norders - 1)\norder_keys = np.arange(1)\nDataSpectra = [DataSpectrum.open(os.path.expandvars(file), orders=Starfish.data[\"orders\"]) for file in Starfish.data[\"files\"]]\n# list of keys from 0 to (nspectra - 1) Used for indexing purposes.\nspectra_keys = np.arange(len(DataSpectra))\n\n#Instruments are provided as one per dataset\nInstruments = [eval(\"Starfish.grid_tools.\" + inst)() for inst in Starfish.data[\"instruments\"]]\n\n\nlogging.basicConfig(format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\", filename=\"{}log.log\".format(\n Starfish.routdir), level=logging.DEBUG, filemode=\"w\", datefmt='%m/%d/%Y %I:%M:%S %p')\n\nclass Order:\n def __init__(self, debug=False):\n '''\n This object contains all of the variables necessary for the partial\n lnprob calculation for one echelle order. It is designed to first be\n instantiated within the main processes and then forked to other\n subprocesses. Once operating in the subprocess, the variables specific\n to the order are loaded with an `INIT` message call, which tells which key\n to initialize on in the `self.initialize()`.\n '''\n self.lnprob = -np.inf\n self.lnprob_last = -np.inf\n\n self.debug = debug\n\n def initialize(self, key):\n '''\n Initialize to the correct chunk of data (echelle order).\n\n :param key: (spectrum_id, order_key)\n :param type: (int, int)\n\n This method should only be called after all subprocess have been forked.\n '''\n\n self.id = key\n spectrum_id, self.order_key = self.id\n # Make sure these are ints\n self.spectrum_id = int(spectrum_id)\n\n self.instrument = Instruments[self.spectrum_id]\n self.dataSpectrum = DataSpectra[self.spectrum_id]\n self.wl = self.dataSpectrum.wls[self.order_key]\n self.fl = self.dataSpectrum.fls[self.order_key]\n self.sigma = self.dataSpectrum.sigmas[self.order_key]\n self.ndata = len(self.wl)\n self.mask = self.dataSpectrum.masks[self.order_key]\n self.order = int(self.dataSpectrum.orders[self.order_key])\n\n self.logger = logging.getLogger(\"{} {}\".format(self.__class__.__name__, self.order))\n if self.debug:\n self.logger.setLevel(logging.DEBUG)\n else:\n self.logger.setLevel(logging.INFO)\n\n self.logger.info(\"Initializing model on Spectrum {}, order {}.\".format(self.spectrum_id, self.order_key))\n\n self.npoly = Starfish.config[\"cheb_degree\"]\n self.chebyshevSpectrum = ChebyshevSpectrum(self.dataSpectrum, self.order_key, npoly=self.npoly)\n\n # If the file exists, optionally initiliaze to the chebyshev values\n fname = Starfish.specfmt.format(self.spectrum_id, self.order) + \"phi.json\"\n if os.path.exists(fname):\n self.logger.debug(\"Loading stored Chebyshev parameters.\")\n phi = PhiParam.load(fname)\n self.chebyshevSpectrum.update(phi.cheb)\n\n self.resid_deque = deque(maxlen=500) #Deque that stores the last residual spectra, for averaging\n self.counter = 0\n\n self.emulator = Emulator.open()\n self.emulator.determine_chunk_log(self.wl)\n\n self.pca = self.emulator.pca\n\n self.wl_FFT = self.pca.wl\n\n # The raw eigenspectra and mean flux components\n self.EIGENSPECTRA = np.vstack((self.pca.flux_mean[np.newaxis,:], self.pca.flux_std[np.newaxis,:], self.pca.eigenspectra))\n\n self.ss = np.fft.rfftfreq(self.pca.npix, d=self.emulator.dv)\n self.ss[0] = 0.01 # junk so we don't get a divide by zero error\n\n # Holders to store the convolved and resampled eigenspectra\n self.eigenspectra = np.empty((self.pca.m, self.ndata))\n self.flux_mean = np.empty((self.ndata,))\n self.flux_std = np.empty((self.ndata,))\n\n self.sigma_mat = self.sigma**2 * np.eye(self.ndata)\n self.mus, self.C_GP, self.data_mat = None, None, None\n\n self.lnprior = 0.0 # Modified and set by NuisanceSampler.lnprob\n\n # self.nregions = 0\n # self.exceptions = []\n\n # Update the outdir based upon id\n self.noutdir = Starfish.routdir + \"{}/{}/\".format(self.spectrum_id, self.order)\n\n\n def lnprob_Theta(self, p):\n '''\n Update the model to the Theta parameters and then evaluate the lnprob.\n\n Intended to be called from the master process via the command \"LNPROB\".\n '''\n try:\n self.update_Theta(p)\n lnp = self.evaluate() # Also sets self.lnprob to new value\n return lnp\n except C.ModelError:\n self.logger.debug(\"ModelError in stellar parameters, sending back -np.inf {}\".format(p))\n return -np.inf\n\n def evaluate(self):\n '''\n Return the lnprob using the current version of the C_GP matrix, data matrix,\n and other intermediate products.\n '''\n\n self.lnprob_last = self.lnprob\n\n X = (self.chebyshevSpectrum.k * self.flux_std * np.eye(self.ndata)).dot(self.eigenspectra.T)\n\n CC = X.dot(self.C_GP.dot(X.T)) + self.data_mat\n\n try:\n factor, flag = cho_factor(CC)\n except np.linalg.linalg.LinAlgError:\n print(\"Spectrum:\", self.spectrum_id, \"Order:\", self.order)\n self.CC_debugger(CC)\n raise\n\n try:\n R = self.fl - self.chebyshevSpectrum.k * self.flux_mean - X.dot(self.mus)\n\n logdet = np.sum(2 * np.log((np.diag(factor))))\n self.lnprob = -0.5 * (np.dot(R, cho_solve((factor, flag), R)) + logdet)\n\n self.logger.debug(\"Evaluating lnprob={}\".format(self.lnprob))\n return self.lnprob\n\n # To give us some debugging information about what went wrong.\n except np.linalg.linalg.LinAlgError:\n print(\"Spectrum:\", self.spectrum_id, \"Order:\", self.order)\n raise\n\n\n def update_Theta(self, p):\n '''\n Update the model to the current Theta parameters.\n\n :param p: parameters to update model to\n :type p: model.ThetaParam\n '''\n\n # durty HACK to get fixed logg\n # Simply fixes the middle value to be 4.29\n # Check to see if it exists, as well\n fix_logg = Starfish.config.get(\"fix_logg\", None)\n if fix_logg is not None:\n p.grid[1] = fix_logg\n print(\"grid pars are\", p.grid)\n\n self.logger.debug(\"Updating Theta parameters to {}\".format(p))\n\n # Store the current accepted values before overwriting with new proposed values.\n self.flux_mean_last = self.flux_mean.copy()\n self.flux_std_last = self.flux_std.copy()\n self.eigenspectra_last = self.eigenspectra.copy()\n self.mus_last = self.mus\n self.C_GP_last = self.C_GP\n\n # Local, shifted copy of wavelengths\n wl_FFT = self.wl_FFT * np.sqrt((C.c_kms + p.vz) / (C.c_kms - p.vz))\n\n # If vsini is less than 0.2 km/s, we might run into issues with\n # the grid spacing. Therefore skip the convolution step if we have\n # values smaller than this.\n # FFT and convolve operations\n if p.vsini < 0.0:\n raise C.ModelError(\"vsini must be positive\")\n elif p.vsini < 0.2:\n # Skip the vsini taper due to instrumental effects\n eigenspectra_full = self.EIGENSPECTRA.copy()\n else:\n FF = np.fft.rfft(self.EIGENSPECTRA, axis=1)\n\n # Determine the stellar broadening kernel\n ub = 2. * np.pi * p.vsini * self.ss\n sb = j1(ub) / ub - 3 * np.cos(ub) / (2 * ub ** 2) + 3. * np.sin(ub) / (2 * ub ** 3)\n # set zeroth frequency to 1 separately (DC term)\n sb[0] = 1.\n\n # institute vsini taper\n FF_tap = FF * sb\n\n # do ifft\n eigenspectra_full = np.fft.irfft(FF_tap, self.pca.npix, axis=1)\n\n # Spectrum resample operations\n if min(self.wl) < min(wl_FFT) or max(self.wl) > max(wl_FFT):\n raise RuntimeError(\"Data wl grid ({:.2f},{:.2f}) must fit within the range of wl_FFT ({:.2f},{:.2f})\".format(min(self.wl), max(self.wl), min(wl_FFT), max(wl_FFT)))\n\n # Take the output from the FFT operation (eigenspectra_full), and stuff them\n # into respective data products\n for lres, hres in zip(chain([self.flux_mean, self.flux_std], self.eigenspectra), eigenspectra_full):\n interp = InterpolatedUnivariateSpline(wl_FFT, hres, k=5)\n lres[:] = interp(self.wl)\n del interp\n\n # Helps keep memory usage low, seems like the numpy routine is slow\n # to clear allocated memory for each iteration.\n gc.collect()\n\n # Adjust flux_mean and flux_std by Omega\n Omega = 10**p.logOmega\n self.flux_mean *= Omega\n self.flux_std *= Omega\n\n\n\n # Now update the parameters from the emulator\n # If pars are outside the grid, Emulator will raise C.ModelError\n self.emulator.params = p.grid\n self.mus, self.C_GP = self.emulator.matrix\n\n\n\nclass SampleThetaPhi(Order):\n\n def initialize(self, key):\n # Run through the standard initialization\n super().initialize(key)\n\n # for now, start with white noise\n self.data_mat = self.sigma_mat.copy()\n self.data_mat_last = self.data_mat.copy()\n\n #Set up p0 and the independent sampler\n fname = Starfish.specfmt.format(self.spectrum_id, self.order) + \"phi.json\"\n phi = PhiParam.load(fname)\n\n # Set the regions to None, since we don't want to include them even if they\n # are there\n phi.regions = None\n\n #Loading file that was previously output\n # Convert PhiParam object to an array\n self.p0 = phi.toarray()\n\n jump = Starfish.config[\"Phi_jump\"]\n cheb_len = (self.npoly - 1) if self.chebyshevSpectrum.fix_c0 else self.npoly\n cov_arr = np.concatenate((Starfish.config[\"cheb_jump\"]**2 * np.ones((cheb_len,)), np.array([jump[\"sigAmp\"], jump[\"logAmp\"], jump[\"l\"]])**2 ))\n cov = np.diag(cov_arr)\n\n def lnfunc(p):\n # Convert p array into a PhiParam object\n ind = self.npoly\n if self.chebyshevSpectrum.fix_c0:\n ind -= 1\n\n cheb = p[0:ind]\n sigAmp = p[ind]\n ind+=1\n logAmp = p[ind]\n ind+=1\n l = p[ind]\n\n par = PhiParam(self.spectrum_id, self.order, self.chebyshevSpectrum.fix_c0, cheb, sigAmp, logAmp, l)\n\n self.update_Phi(par)\n\n # sigAmp must be positive (this is effectively a prior)\n # See https://github.com/iancze/Starfish/issues/26\n if not (0.0 < sigAmp): \n self.lnprob_last = self.lnprob\n lnp = -np.inf\n self.logger.debug(\"sigAmp was negative, returning -np.inf\")\n self.lnprob = lnp # Same behavior as self.evaluate()\n else:\n lnp = self.evaluate()\n self.logger.debug(\"Evaluated Phi parameters: {} {}\".format(par, lnp))\n\n return lnp\n\n\n def update_Phi(self, p):\n self.logger.debug(\"Updating nuisance parameters to {}\".format(p))\n\n # Read off the Chebyshev parameters and update\n self.chebyshevSpectrum.update(p.cheb)\n\n # Check to make sure the global covariance parameters make sense\n #if p.sigAmp < 0.1:\n # raise C.ModelError(\"sigAmp shouldn't be lower than 0.1, something is wrong.\")\n\n max_r = 6.0 * p.l # [km/s]\n\n # Create a partial function which returns the proper element.\n k_func = make_k_func(p)\n\n # Store the previous data matrix in case we want to revert later\n self.data_mat_last = self.data_mat\n self.data_mat = get_dense_C(self.wl, k_func=k_func, max_r=max_r) + p.sigAmp*self.sigma_mat", "_____no_output_____" ] ], [ [ "# Run the program.", "_____no_output_____" ] ], [ [ "model = SampleThetaPhi(debug=True)", "_____no_output_____" ], [ "model.initialize((0,0))", "INFO:SampleThetaPhi 21:Initializing model on Spectrum 0, order 0.\nDEBUG:SampleThetaPhi 21:Loading stored Chebyshev parameters.\n" ], [ "def lnprob_all(p):\n pars1 = ThetaParam(grid=p[0:3], vz=p[3], vsini=p[4], logOmega=p[5])\n model.update_Theta(pars1)\n # hard code npoly=3 (for fixc0 = True with npoly=4) !\n pars2 = PhiParam(0, 0, True, p[6:9], p[9], p[10], p[11])\n model.update_Phi(pars2)\n lnp = model.evaluate()\n return lnp", "_____no_output_____" ], [ "import emcee", "_____no_output_____" ], [ "start = Starfish.config[\"Theta\"]\nfname = Starfish.specfmt.format(model.spectrum_id, model.order) + \"phi.json\"\nphi0 = PhiParam.load(fname)", "_____no_output_____" ], [ "p0 = np.array(start[\"grid\"] + [start[\"vz\"], start[\"vsini\"], start[\"logOmega\"]] + \n phi0.cheb.tolist() + [phi0.sigAmp, phi0.logAmp, phi0.l])", "_____no_output_____" ], [ "p0", "_____no_output_____" ], [ "sampler = emcee.EnsembleSampler(32, 12, lnprob_all)", "_____no_output_____" ], [ "sampler.lnprobfn.f(p0)", "DEBUG:SampleThetaPhi 21:Updating Theta parameters to grid:[ 6.22800000e+03 4.26400000e+00 -3.42000000e-01] vz:-4.85 vsini:6.128 logOmega:-12.698 Av:0.0\nDEBUG:SampleThetaPhi 21:Updating nuisance parameters to spectrum_id:0 order:0 fix_c0:True cheb:[-0.018 -0.0176 -0.004 ] sigAmp:1.0 logAmp:-13.6 l:20.0 regions:None\nDEBUG:SampleThetaPhi 21:Evaluating lnprob=74382.87041812515\n" ], [ "p0.shape", "_____no_output_____" ], [ "p0.shape", "_____no_output_____" ], [ "p0_std = [5, 0.02, 0.02, 0.5, 0.5, -0.01, -0.005, -0.005, -0.005, 0.01, 0.001, 0.5]", "_____no_output_____" ], [ "nwalkers=36", "_____no_output_____" ], [ "p0_ball = emcee.utils.sample_ball(p0, p0_std, size=nwalkers)", "_____no_output_____" ], [ "p0_ball.shape", "_____no_output_____" ] ], [ [ "# This will take a while:", "_____no_output_____" ] ], [ [ "#val = sampler.run_mcmc(p0_ball, 10)", "_____no_output_____" ], [ "np.save('emcee_chain.npy',sampler.chain)\n\nprint(\"The end.\")", "_____no_output_____" ] ], [ [ "The end.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
c52d56ef4287eaf864211544e67737538d9e02c0
17,350
ipynb
Jupyter Notebook
01_RampUp/week2/pra02_control_flujos_parte_I_Pruebas_Talita.ipynb
talitacardenas/The_Bridge_School_DataScience_PT
7c059d06a0eb53c0370d1db8868e0e7cb88c857b
[ "Apache-2.0" ]
null
null
null
01_RampUp/week2/pra02_control_flujos_parte_I_Pruebas_Talita.ipynb
talitacardenas/The_Bridge_School_DataScience_PT
7c059d06a0eb53c0370d1db8868e0e7cb88c857b
[ "Apache-2.0" ]
null
null
null
01_RampUp/week2/pra02_control_flujos_parte_I_Pruebas_Talita.ipynb
talitacardenas/The_Bridge_School_DataScience_PT
7c059d06a0eb53c0370d1db8868e0e7cb88c857b
[ "Apache-2.0" ]
null
null
null
25.328467
344
0.442248
[ [ [ "# Condicionales IF...ELIF...ELSE\n\nnos permite realizar evaluaciones basada en respuestas TRUE o FALSE\n", "_____no_output_____" ] ], [ [ "if respuesta == 'SI' or respuesta == 'S' or respuesta == 'si':\n print(\"Continuar...\")\n print(\"Completado!\")\nelif respuesta == 'not' or respuesta == 'N' or respuesta == 'no':\n print(\"Salida!\")\nelse:\n print(\"Intentalo nuevamente....\")", "_____no_output_____" ] ], [ [ "# Vamos con el laboratorio 2\n\nEscribir un programa que convierta la temperatura de Fahrenheit a Celsius\n\nEn este código se muestra la fórmula matemática para convertir una temperatura medida en grados Fahrenheit a una temperatura medida en grados Celsius:\n\ncelsius = (fahrenheit - 32) * 5/9\n\nUse esta fórmula para compilar un programa que **solicite a los usuarios** una temperatura en grados Fahrenheit, realice la conversión a Celsius y, después, muestre la temperatura en grados Celsius.\n\nSi el usuario escribe el valor 55, el programa debe generar este resultado:\n\n*OUTPUT What is the temperature in fahrenheit? 55 Temperature in celsius is 12 *\n\nPero si el usuario escribe un valor no válido, el programa debe indicarle que hay un problema y, después, cerrar el programa. En este caso, si el usuario escribe el valor Bob, el programa debe generar este resultado:\n\nOUTPUT What is the temperature in fahrenheit? bob Input is not a number.\n\nNota\n\nPara comprobar el trabajo, use un conversor de temperatura en línea y confirme que el resultado del programa coincide con el del conversor.\n\nTanto si tiene dificultades y necesita echar un vistazo a la solución como si finaliza el ejercicio correctamente, continúe para ver una solución a este desafío.", "_____no_output_____" ] ], [ [ "a = 56\ntype(a)", "_____no_output_____" ], [ "fahrenheit = input(\"Cuál es la temperatura FAH de: \")", "Cuál es la temperatura FAH de: 55\n" ] ], [ [ "### GENERAR UN PSEUDOCÓDIGO\n```\nfah = introducir un valor numérico\nevaluamos este valor numèrico, y \nSI es valor NO ES numérico, \nELSE print (Este valor no es numèrico)\no reintenta\n\nSI el valor ES numérico\ncelsius = int(fah - 32) * 5/9\nprint(La temperatura es CELSIUS)\n\n```", "_____no_output_____" ] ], [ [ "type(fahrenheit) # la evaluación de un valor almacenado a través de un input, lo transforma en STRING (cadena de texto)", "_____no_output_____" ], [ "# celsius = (fahrenheit - 32) * 5/9", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ], [ "Aquí empieza la práctica - Talita", "_____no_output_____" ] ], [ [ "print (\"Por favor informar la temperatura en Fahrenheit\")\nfah = int(input())", "Por favor informar la temperatura en Fahrenheit\n100\n" ], [ "type(fah) # Para coprobar el tipo", "_____no_output_____" ], [ "celsius = int((int(fah)-32)*5/9)", "_____no_output_____" ], [ "celsius # por comprobar", "_____no_output_____" ], [ "type(celsius) # por comprobar", "_____no_output_____" ], [ "if type(fah) == int:\n print (\"Su temperatura es de\",celsius, \" Celsius.\")\nelse:\n print (fah, \"no es un valor numerico, informe un valor valido\")\n\n", "Su temperatura es de 148 Celsius.\n" ], [ "", "_____no_output_____" ] ], [ [ "Segundo test", "_____no_output_____" ] ], [ [ "print (\"Por favor informar la temperatura en Fahrenheit\")\nfah2 = input()", "Por favor informar la temperatura en Fahrenheit\nBA\n" ], [ "type(fah2)", "_____no_output_____" ], [ "fah4 = int(fah2)", "_____no_output_____" ], [ "type (fah4)", "_____no_output_____" ], [ "celsius2 = int((int(fah4)-32)*5/9)", "_____no_output_____" ], [ "numb = list(range(0,1000))", "_____no_output_____" ], [ "fah4 in numb #Por comprobar", "_____no_output_____" ], [ "if fah4 in numb:\n print(\"Su temperatura es de \", celsius2,\" Celsius.\")\nelse:\n print (fah2, \"no es un valor numerico, informe un valor valido.\")", "Su temperatura es de 426 Celsius.\n" ], [ "", "_____no_output_____" ] ], [ [ "Tercer tentativa", "_____no_output_____" ] ], [ [ "print(\"Por favor informar la temperatura en Fahrenheit :\")\nfah3 = input()", "Por favor informar la temperatura en Fahrenheit :\n800\n" ], [ "type (fah3)", "_____no_output_____" ], [ "celsius3 = int((int(fah3)-32)*5/9)", "_____no_output_____" ], [ "if type(int(fah3))== int:\n print (\"Su temperatura es de \", celsius3,\" Celsius.\")\nelse:\n print (fah3, \"no es un valor numerico, informe un valor valido.\")", "Su temperatura es de 426 Celsius.\n" ] ], [ [ "# es tarde no estoy segura si soy capaz\nSi determino a principio que el input es un integer, cuando intento poner letras me da error, pero si no determino quees un integer a principios, no reconoce en en momento de la cmprobación.\nHe intentado con lista de rango, pero tampoco me sirve.\nHe pensado que la comprobacion debe ir a principios, en el momento que inputa el dato, pero no lo sé hacerlo.\nSi la intención es que el usário informe lo que quiera, no veo como ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ] ]
c52d58a6f5cc4437bc69821a0fc1f74678f67e0e
17,600
ipynb
Jupyter Notebook
content/assignments/mid_proj.ipynb
yonboyjohnjoy/ledatascifi-2022
9d926b0f4329b09f2b662ccacc40b5844da82715
[ "MIT" ]
null
null
null
content/assignments/mid_proj.ipynb
yonboyjohnjoy/ledatascifi-2022
9d926b0f4329b09f2b662ccacc40b5844da82715
[ "MIT" ]
null
null
null
content/assignments/mid_proj.ipynb
yonboyjohnjoy/ledatascifi-2022
9d926b0f4329b09f2b662ccacc40b5844da82715
[ "MIT" ]
null
null
null
69.291339
464
0.671761
[ [ [ "# Midterm aka Assignment 5 - Our first real data science project\n\n```{admonition} Tips\n:class: tip\n1. Read all instructions before starting.\n1. Start early. Work on the components of the project in parallel with related class discussions.\n1. RECHECK THESE INSTRUCTIONS BEFORE SUBMITTING\n```\n\n```{warning}\nPer the [syllabus](../about/gradeoverview), this project is 10% if your overall grade, which is about 2x the weight of a typical assignment. It will probably take 2-3x the time of a typical assignment.\n\n**Really fun news:** This is a end-to-end data science project! You will be downloading a lot of files, parsing/exploring/cleaning those file, and then exploring the data. \n\nBUT: It will take time! If you start the day before it is due, YOU WILL NOT FINISH IT. If you start two days before it is do, you might finish it, but it will not be done well.\n```", "_____no_output_____" ], [ "## Project Set Up\n\nThe nuts and bolts of the set up are: \n\n- Basic question: What \"types\" of firms were hurt more or less by covid?\n- Specific questions: What risk factors were associated with better/worse stock returns around the onset of covid?\n - This is called a \"cross-sectional event study\"\n - Expected minimum output: Scatterplot (x = some \"risk factors\", y = returns around March 2020) with regression lines; formatted well\n - Discussion of the economics linking the your risk factors to the returns is expected\n - Pro output: Regression tables, heatmaps, better scatterplots \n- New data science technique: Textual analysis. We will estimate \"risk factors\" from the text of S&P 500 firm's 10-K filings. \n - More on this below\n- Data needed:\n - Returns: Stock returns for S&P 500 firms can be pulled from Yahoo\n - Risk factors for each firm will be created from their 10-K filings. \n \nSo your main challenge... is to create variables that measure risks for each firm. \n \n \n## Steps to complete the assignment\n \n```{dropdown} 1. Start the assignment \n\n- As usual, click the link I provide in the discussion board. \n- But unlike before, the repo will be essentially empty. This is a start to finish project, so I'm letting you dictate the structure of the files.\n- Clone this to your computer.\n```\n\n````{dropdown} 2. Edit **.gitignore** \n\nThe `download_text_files.ipynb` file will create a large data structure in a subfolder called `text_files/` with all the downloaded 10-K files. There will be several gigs of data in this folder. We don't want to save/push all these files to github!\n\n```{warning}\nSo add this directory (`text_files/`) to your gitignore before you proceed!\n```\n\n````\n\n\n````{dropdown} 3. Create **download_text_files.ipynb** \n\nThis file \n1. Should create a subfolder for inputs (`inputs/`). You should probably save the S&P500 list from the wikipedia page there. \n1. Should create another subfolder (`text_files/`) to hold all the text files you download. Because scraping can generate large amounts of files, I usually put it in a dedicated input folder instead of the generic input folder we just made.\n\n**Tips/recommendations**\n\n1. Try to download just one 10-K at first. When you can successfully do that, try a few more, one at a time. Check the folders on your computer - did they download like you expected? Are the files correct? If yes, continue. If not, you have an error to fix. \n1. The website has really good info on \"building a spider.\" Highly recommend! \n1. \n\n```{tip}\nWhen you are confident the program works, \n1. Delete your whole `text_files/` and `input/` subfolders on your computer so you have a \"fresh start\" \n2. Rerun this from scratch. \n3. Rerun the file AGAIN (but don't delete the files you have). Does the file work after it's already been run, or partially completed it's work? Real spiders have to resume where they left off. You might need to make some conditional tweaks to the file to account for this. You don't want the code to actually re-download the data, but the code should still run without error!\n```\n\n````\n\n```{dropdown} 4. IMPORTANT: Create **screenshot.png**\n\nIt's not polite to upload so much data to GitHub. It takes up space on the server, and your collaborators/peer reviewers will have to download them all when they clone your repo. \n\nThat's why you edited the gitignore before doing all those downloads. If you did it correctly and check Github Desktop, you won't see any of the text files!\n\n1. Now that your `download_text_files.ipynb` is done running, push the repo. Even though your _computer_ has a `/text_files/*` folder on it with many files and some hard drive space used, the repo in your browser doesn't show this at all! Good job!\n2. **Create `screenshot.png`. The purpose of this is to upload proof of the files for your reviewers.**\n\nRight click your `text_files` folder so it shows the number of files inside of it, and take a screenshot showing this. Save it as `screenshot.png` inside your repo. \n```\n\n\n```{dropdown} 5. Download **near_regex.py** from the community codebook into your repo\n\nThis will be used in the next step.\n```\n\n\n````{dropdown} 6. Create **measure_risk.ipynb**\n\nThe basic idea is to measure risks by counting the number of times a given risk topic is discussed in the 10-K.\n\nThis file (broad steps)\n1. Creates an `output/` folder \n1. Loads the initial dataset of sample firms saved inside of `input/`.\n1. For each firm, load the corresponding 10-K and create (at least) 5 different risk measures, and save those new measurements to each of 5 new variables in that row. \n 1. **Pick one risk type, and think of three ways to measure it.** For example, there are many ways you could try to measure \"antitrust risk\", so come up with 3 different ways to measure it from the text. You can try different terms, different combinations of terms, different limits on how close terms need to be, and more. Comparing these different ways might help you understand how your choices can improve or hurt the value of your measurement. \n 2. **Pick a second risk type and create a single measure for it** (you only need to do one measurement on this risk type, but you can do more)\n 3. **Pick a third risk type and create a single measure for it** (again, you only need to do one, but you can do more)\n 4. Bonus measures - interesting variables you could also measure:\n - The total length of the document (# of words)\n - The # of unique words (similar to total length)\n - The \"tone\" of the document\n2. Downloads 2019 accounting data (**2019 ccm_cleaned.dta**) from the data folder in the class repo on S&P500 firms (possibly useful in analysis) and adds them to the dataset \n1. Save the whole thing to `output/sp500_accting_plus_textrisks.csv`\n\n```{note}\n[There is a bunch more on this file/step here.](asgn05_measurerisk)\n```\n\n```{tip}\nWhen you are confident the program works, delete your whole `output/` folder on your computer so you have a \"fresh start\" and then rerun this from scratch. \n```\n\n````\n\n````{dropdown} 7. Create **explore_ugly.ipynb** to see if your risk factors were associated with higher or lower returns around covid.\n\nTry to figure out how to do the analysis below, downloading and intergrating return measures. Play around in this file. No one will look at it. It's a safe space.\n\nIf you find issues with your risk measurements or come up with improvements you think you should make, go back and work on the previous file more. \n\nYou can and should use this file to figure out what you want to include in the final report and how you want it to appear.\n\n````\n\n\n````{dropdown} 8. Create **analysis_report.ipynb** \n\n```{important}\nThis is the main portion of your grade. It should be well formatted and clean in terms of text, code, and output. Don't show extraneous print statements. Treat it like a Word document that happens to have some code (but just enough to do the analysis and show outputs). I've included more thoughts in the next dropdown. \n```\n\n```{tip}\nFirst compute the returns for the 3/9-3/13 week. This will give you a dataset with one row per firm, and one number per row (the return for that week). Then merge this into the analysis dataset. Rinse and repeat if you try for the other return measures I describe below. \n```\n\n1. Load `output/sp500_accting_plus_textrisks.csv`\n1. Explain and describe to readers your risk measurements \n - How were they measured? (Mechanical description)\n - Why did you choose them and what do you hope they capture? (Economic reasoning)\n - What are their statistical properties? (Do you have values for most/all firms, they should have variation within them, are they correlated with any accounting measures)\n1. **Validation checks and discussion of the risk measurements** **This step (validating the measurement) is very important in production quality analysis!***\n - Discuss briefly whether these measurements are likely \"valid\" in the sense they capture what you hope. \n - Present some evidence they do capture your hopes. There are many ways to do this, and depend on the data you have and the risks you're measuring. \n - You might print out a few examples of matches.\n - One option is to show sentences that will correctly be caught by the search, and correctly not caught. And how easy is it for your search to find a sentence that matches the search but shouldn't.. (Hopefully: not too easy!) How easy is it for your search to miss a sentence that it should match...\n - One option is to output the list of firms that have high scores, or the industries that have high and low scores. Does the output make sense? \n1. Describe the _final_ sample for your tests, the set of observations where you have all the data you need. \n - This includes summary stats,the number of firms, and other things EDA would turn up\n - Are there any caveats about the sample and/or data? If so, mention them and briefly discuss possible issues they raise with the analysis. \n1. Explore the correlation between your risk values and stock returns around key dates for the onset of covid. \n - Stock returns are in the class's data folder (\"2019-2020-stock_rets cleaned.zip\")\n - Get the firm's returns for the week of Mar 9 - Mar 13, 2020 (the cumulative return for the week)\n - Bonus: repeat the analysis but use the cumulative returns from Feb 23-Mar 23 as the \"collapse period\"\n - Bonus: repeat the analysis but use Mar 24 as the \"stimmy day\" (stimulus was announced) ... how does this change your results, and is it doing so in a predictable way?\n - Bonus: repeat the analysis, but use firm accounting variables: Some of these probably indicate that a firm should be more [resilient to the crisis](https://privpapers.ssrn.com/sol3/papers.cfm?abstract_id=3597838)! \n - Present your findings visually and follow the lessons on effective visualization!\n - You should write brief summaries of your findings. \n1. Bonus: Explore the risk-return relationship, but use regressions so that you can control for firm traits and market returns. Does this change your results? \n - Don't worry about printing these regressions out \"pretty\", just try them if you want!\n1. Bonus: Use **alpha** as y, not returns, in your plots and/or regressions. This will likely change the results. \n - Step 1: Separately, for each firm, [estimate the beta and factor loadings](https://ledatascifi.github.io/ledatascifi-2022/content/05/05c_factorloadings.html) of each firm's returns in **2019**. Save that data.\n - Step 2: For firm i on date t, alpha(i,t) = ret(i,t) - beta(of firm i)*mkt_return(t) - SMB(of firm i)*SMB_port_ret(t) - HML(of firm i)*HML_port_ret(t)\n - _SMB_port_ret(t) is the return on the SMB portfolio on date t, which you can get from the Fama-French datasets!_\n - Just present the findings if you do this. Don't worry about explaining it - but it might make more sense in a few weeks!\n\n```{note}\nIf you want to do any regressions, let me know. I'll give you a few pointers. \n```\n\n````\n\n````{dropdown} 9. Finalize and polish\n\nUnlike previous assignments, how clean your code and report are will factor into your grade. Additionally, your README file should be nice!\n\n**Edit the readme file - it should be \"publication ready\"**\n- Make the readme file informative and professional. \n- Inform readers of the order in which files should be run. And warn users that this folder will download X files of X MB or GB.\n- Change the title of it (not the filename, the title at the top)\n- Describe the purpose of this repo (what this repo is analyzing) and the key inputs\n- List any necessary packages (might a reader need to `pip install` anything?) or steps a visitor will need to run to make it work on their computer \n\n**The `analysis_report` file should be written and formatted like an executive report.** \n- There is no \"page expectation\" or \"page limit\". Aim to provide sufficient analysis and explanation, but in a concise and clear way. Bullet points are fine in places, but you should have a few places with paragraph-style discussion, especially where you explain why you chose the specific risks, the way you defined them, and what issues you think they have (which points the way forward on \"extensions\"). \n- In other words: You will be graded on how much this looks like a professional report. Just \"dumping\" endless printouts is not as valuable as well-tailored tables and figures. High quality and concise reporting is an A1 emphasis. **Here, pretty, smart, and effective tables and visualizations will receive higher grades.** \n- **The teaching team will _not_ read your measure_risk file other than to comment on code style.** So: \n 1. Any details in that file on search terms and descriptive information on your text-based measures should be copied into your analysis file (with appropriate adjustments to suit how a report would be presented). \n 2. Make the measurement code easy to read, because we will grade the code style. \n \n````", "_____no_output_____" ], [ "## Cheers!\n\n**Give yourself a big round of applause at this point!**\n\nYour code is probably very flexible and powerful at this point. If you have the appetite + a larger list of EDGAR files to download + a large enough hard drive + and time, then you could download more than 100GB of 10-K filings and run textual analysis across 20+ years of data for all publicly traded firms. \n\nSeriously: You are in the ball park of pulling off any analysis you want that needs to harness the power of these filings. These four studies are variously provocative, great, and (in one case) mine:\n- [Check this claim: Identifying changes in 10-K/Q filings can generate a 20% alpha](https://papers.ssrn.com/sol3/papers.cfm?abstract_id=1658471)\n- [Prof. Hanley measured emerging risks in the financial sector](https://papers.ssrn.com/sol3/papers.cfm?abstract_id=2792943)\n- [Build a unique list of competitors for each firm (really powerful!)](https://papers.ssrn.com/sol3/papers.cfm?abstract_id=1520062)\n- [I used 10-K text to identify public rivals of startup firms](https://ssrn.com/abstract=3245839)", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown" ] ]
c52d603f9abea53c0ed338905804697c5679962f
611,457
ipynb
Jupyter Notebook
examples/tidal_example.ipynb
ryancoe/MHKiT-Python
e2564d6f22a4a19aadd29568838366e58741f88c
[ "BSD-3-Clause" ]
null
null
null
examples/tidal_example.ipynb
ryancoe/MHKiT-Python
e2564d6f22a4a19aadd29568838366e58741f88c
[ "BSD-3-Clause" ]
3
2020-06-23T17:16:38.000Z
2021-01-20T23:43:28.000Z
examples/tidal_example.ipynb
ryancoe/MHKiT-Python
e2564d6f22a4a19aadd29568838366e58741f88c
[ "BSD-3-Clause" ]
null
null
null
1,916.793103
212,364
0.96063
[ [ [ "# MHKiT Tidal Module\n\nThe following example will familiarize the user with the [MHKiT tidal module](https://mhkit-software.github.io/MHKiT/mhkit-python/api.tidal.html) by stepping through the calculation of the velocity duration curve. The data file used in this example is stored in the [\\\\\\\\MHKiT\\\\\\\\examples\\\\\\\\data](https://github.com/MHKiT-Software/MHKiT-Python/tree/master/examples/data) directory.\n\nStart by importing the necessary MHKiT module.", "_____no_output_____" ] ], [ [ "from mhkit import tidal", "_____no_output_____" ] ], [ [ "## Loading Data from NOAA-Currents\n \nThis example uses 1 year of data from the NOAA-Currents sites. A map of available currents stations is available at https://tidesandcurrents.noaa.gov/map/. The tidal io module includes two functions to import data: `request_noaa_data` which pulls data from the website, and `read_noaa_json` which loads a JSON file. The request function can save the JSON file for later use. \n\nFor simplicity, this example loads data from a JSON file into a pandas DataFrame. This data contains 1 year of 6 minute averaged data from the Southampton Shoal Channel LB 6 (Station Number: s08010) in San Francisco Bay. The data includes 6 minute averaged direction [degrees] and speed [cm/s] indexed by time. The DataFrame key names returned by NOAA are 'd' for direction and 's' for speed. Since MHKIT uses SI units, speed is converted to m/s. ", "_____no_output_____" ] ], [ [ "# Load tidal data, South Hampton Shoal LB 6\ndata, metadata = tidal.io.read_noaa_json('data/tidal/s08010.json')\n\n# Convert discharge data from cm/s to m/s\ndata.s = data.s / 100\n\n# Print data\nprint(data)", " s d b\n2016-11-08 12:04:00 0.673 358 4\n2016-11-08 12:34:00 0.689 360 4\n2016-11-08 12:46:00 0.738 356 4\n2016-11-08 12:58:00 0.744 359 4\n2016-11-08 13:10:00 0.648 358 4\n... ... ... ..\n2018-04-01 22:02:00 0.089 296 4\n2018-04-01 22:14:00 0.102 356 4\n2018-04-01 22:26:00 0.011 3 4\n2018-04-01 22:38:00 0.060 193 4\n2018-04-01 23:20:00 0.439 165 4\n\n[18890 rows x 3 columns]\n" ] ], [ [ "The data can also be obtained using the function `request_noaa_data` in the tidal IO module. \nTo use this function, we need a station number, parameter type, start date, and end date.\nThe station number can be found on the NOAA tides and currents website linked above. \nThe IEC standard recommends 1 year of 10-minute direction and velocity data. The request function allows users to easily pull any timeframe of data although NOAA limits any one pull to 30 days.\n\nThe following code, which has been commented out for this demonstration, can be used to pull data from the NOAA website. This function can be used to save data to a JSON for later use.", "_____no_output_____" ] ], [ [ "#data, metadata = tidal.io.request_noaa_data(station='s08010', parameter='currents',\n# start_date='20161101', end_date='20180401',\n# proxy=None, write_json=`data/s08010.json`)", "_____no_output_____" ] ], [ [ "## Principal Flow Directions\nAs an initial check on the data, a velocity plot can be created to identify data gaps. To consider the velocity in one of the principal flow directions we apply the `principal_flow_directions` function. This function returns 2 directions (in degrees) corresponding to the flood and ebb directions of the tidal site. Principal flow directions are calculated based on the highest frequency directions. These directions are often close to 180 degrees apart but are not required to be.\n\nThe `plot_current_timeseries` function plots velocity in either direction using the speed timeseries. ", "_____no_output_____" ] ], [ [ "# Specify histogram bin width for directions to calculate the principal flow directions \nwidth_direction = 1 # in degrees\n\n# Compute two principal flow directions\ndirection1, direction2 = tidal.resource.principal_flow_directions(data.d, width_direction)\n\n# Set flood and ebb directions based on site knowledge\nflood = direction1 # Flow into \nebb = direction2 # Flow out ", "_____no_output_____" ] ], [ [ "The time series of current data can be plotted using the `plot_current_timeseries` function, which can include either the flood or ebb directions.", "_____no_output_____" ] ], [ [ "ax = tidal.graphics.plot_current_timeseries(data.d, data.s, flood)", "_____no_output_____" ] ], [ [ "The plot above shows missing data for most of early and mid-2017. The IEC standard recommends a minimum of 1 year of 10 minute averaged data (See IEC 201 for full description). For the demonstration, this dataset is sufficient. To look at a specific month we can slice the dataset before passing to the plotting function.", "_____no_output_____" ] ], [ [ "# Slice December of 2017 out of the full dataset\ndec17_data = data.loc['2017-12-01':'2017-12-31']\n\n# Plot December of 2017 as current timeseries\nax = tidal.graphics.plot_current_timeseries(dec17_data.d, dec17_data.s, flood)", "_____no_output_____" ] ], [ [ "## Joint Probability Distribution\n\nDirection and velocity can be viewed as a joint probability distribution on a polar plot. This plot helps visually show the flood and ebb directions and the frequency of particular directional velocities. ", "_____no_output_____" ] ], [ [ "# Set the joint probability bin widths\nwidth_direction = 1 # in degrees\nwidth_velocity = 0.1 # in m/s\n\n# Plot the joint probability distribution\nax = tidal.graphics.plot_joint_probability_distribution(data.d, data.s, \\\n width_direction, width_velocity, metadata=metadata, flood=flood, ebb=ebb)", "_____no_output_____" ] ], [ [ "## Rose plot\n\nA rose plot shows the same information as the joint probability distribution but the probability is now the r-axis, and the velocity is the contour value. As compared to a joint probability distribution plot, a rose plot can be more readable when using larger bins sizes.", "_____no_output_____" ] ], [ [ "# Define bin sizes\nwidth_direction = 10 # in degrees\nwidth_velocity = 0.25 # in m/s\n\n# Create a rose plot\nax = tidal.graphics.plot_rose(data.d, data.s, width_direction, \\\n width_velocity, metadata=metadata, flood=flood, ebb=ebb)", "_____no_output_____" ] ], [ [ "## Velocity Duration Curve\n\nThe velocity duration curve shows the probability of achieving a particular velocity value. After computing the exceedance probability, the rank order of velocity values can be plotted as follows.", "_____no_output_____" ] ], [ [ "# Calculate exceedance probability of data\ndata['F'] = tidal.resource.exceedance_probability(data.s)\n\n# Plot the velocity duration curve (VDC)\nax = tidal.graphics.plot_velocity_duration_curve(data.s, data.F)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
c52d78bd0540b876519ed49598e8426305ce548c
6,787
ipynb
Jupyter Notebook
Syllabus.ipynb
anderson-github-classroom/syllabus-assignment-sarahkurd
c2cfb70b003fdca92e90fcb6523cfea5d8ac529b
[ "MIT" ]
null
null
null
Syllabus.ipynb
anderson-github-classroom/syllabus-assignment-sarahkurd
c2cfb70b003fdca92e90fcb6523cfea5d8ac529b
[ "MIT" ]
null
null
null
Syllabus.ipynb
anderson-github-classroom/syllabus-assignment-sarahkurd
c2cfb70b003fdca92e90fcb6523cfea5d8ac529b
[ "MIT" ]
null
null
null
23.565972
386
0.507883
[ [ [ "# Name(s)\n**Sarah Kurdoghlian**", "_____no_output_____" ], [ "**Instructions:** This is an individual assignment. Complete the following code and push to get your score.", "_____no_output_____" ], [ "I am providing the autograder answers locally so you may test your code before pushing. I will be reviewing your submissions, and if I find you are circumventing the autograder in any manner, you will receive a 0 on this assignment and your case will be reported to the honor board for review. i.e., approach the assignment in a genuine manner and you have nothing to worry about.", "_____no_output_____" ], [ "**Question 1.**\nWhen will new material be available each week?", "_____no_output_____" ], [ "You can answer the question by defining an anonymous function. This creates a function that I can test using pytest. You don't have to worry about the details. You just need to answer the question by changing the string argument that is currently set to \"D\". I know this is a bit weird, but I want you to get used to submitting code as early as possible.", "_____no_output_____" ] ], [ [ "# Nothing to modify in this cell\ndef question_1(answer):\n answers = {\n \"A\": \"Monday morning\",\n \"B\": \"Sunday night\",\n \"C\": \"Monday evening\",\n \"D\": \"I don't know\"\n }\n try:\n return answers[answer]\n except:\n return \"Not a valid answer\"", "_____no_output_____" ], [ "# YOUR SOLUTION HERE\n# Sample incorrect answer\nanswer_question_1 = lambda: question_1(\"C\")", "_____no_output_____" ] ], [ [ "**Question 2.**\nDo I need to buy the textbook?", "_____no_output_____" ] ], [ [ "# Nothing to modify in this cell\ndef question_2(answer):\n answers = {\n \"A\": \"No\",\n \"B\": \"Maybe\",\n \"C\": \"Yes. You will struggle with some of the chapters without the textbook\",\n }\n try:\n return answers[answer]\n except:\n return \"Not a valid answer\"", "_____no_output_____" ], [ "# YOUR SOLUTION HERE\n# Sample incorrect answer\nanswer_question_2 = lambda: question_2(\"C\")", "_____no_output_____" ] ], [ [ "**Question 3.**\nAre these any required times that I be online?", "_____no_output_____" ] ], [ [ "# Nothing to modify in this cell\ndef question_3(answer):\n answers = {\n \"A\": \"Yes\",\n \"B\": \"No\"\n }\n try:\n return answers[answer]\n except:\n return \"Not a valid answer\"", "_____no_output_____" ], [ "# YOUR SOLUTION HERE\n# Sample incorrect answer\nanswer_question_3 = lambda: question_3(\"A\")", "_____no_output_____" ] ], [ [ "**Question 4.**\nWhat software will I use to complete the assignments?", "_____no_output_____" ] ], [ [ "# Nothing to modify in this cell\ndef question_4(answer):\n answers = {\n \"A\": \"Java\",\n \"B\": \"Netbeans\",\n \"C\": \"Anaconda\"\n }\n try:\n return answers[answer]\n except:\n return \"Not a valid answer\"", "_____no_output_____" ], [ "# YOUR SOLUTION HERE\n# Sample incorrect answer\nanswer_question_4 = lambda: question_4(\"C\")", "_____no_output_____" ] ], [ [ "**Question 5.**\nDo I need to participate in this class or can I just do the labs and assignments?", "_____no_output_____" ] ], [ [ "# Nothing to modify in this cell\ndef question_5(answer):\n answers = {\n \"A\": \"Yes. If you want to get anything higher than a C, you'll need to do more than the labs and assignments\",\n \"B\": \"No\",\n }\n try:\n return answers[answer]\n except:\n return \"Not a valid answer\"", "_____no_output_____" ], [ "# YOUR SOLUTION HERE\n# Sample incorrect answer\nanswer_question_5 = lambda: question_5(\"A\")", "_____no_output_____" ], [ "# Don't forget to push!", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
c52d80e47f970ce54e4ef8e28f52d2bde451f44e
82,013
ipynb
Jupyter Notebook
notebooks/04_Linear_Regression.ipynb
glazec/practicalAI
71128f168ea6e1851847981e394cc8d509949c59
[ "MIT" ]
1
2020-11-28T04:44:05.000Z
2020-11-28T04:44:05.000Z
notebooks/04_Linear_Regression.ipynb
glazec/practicalAI
71128f168ea6e1851847981e394cc8d509949c59
[ "MIT" ]
null
null
null
notebooks/04_Linear_Regression.ipynb
glazec/practicalAI
71128f168ea6e1851847981e394cc8d509949c59
[ "MIT" ]
null
null
null
68.860621
29,258
0.71874
[ [ [ "# Linear Regression", "_____no_output_____" ], [ "<img src=\"https://raw.githubusercontent.com/glazec/practicalAI/master/images/logo.png\" width=150>\n\nIn this lesson we will learn about linear regression. We will first understand the basic math behind it and then implement it in Python. We will also look at ways of interpreting the linear model.\n\n", "_____no_output_____" ], [ "# Overview", "_____no_output_____" ], [ "<img src=\"https://raw.githubusercontent.com/glazec/practicalAI/master/images/linear.png\" width=250>\n\n$\\hat{y} = XW$\n\n*where*:\n* $\\hat{y}$ = prediction | $\\in \\mathbb{R}^{NX1}$ ($N$ is the number of samples)\n* $X$ = inputs | $\\in \\mathbb{R}^{NXD}$ ($D$ is the number of features)\n* $W$ = weights | $\\in \\mathbb{R}^{DX1}$ ", "_____no_output_____" ], [ "* **Objective:** Use inputs $X$ to predict the output $\\hat{y}$ using a linear model. The model will be a line of best fit that minimizes the distance between the predicted and target outcomes. Training data $(X, y)$ is used to train the model and learn the weights $W$ using stochastic gradient descent (SGD).\n* **Advantages:**\n * Computationally simple.\n * Highly interpretable.\n * Can account for continuous and categorical features.\n* **Disadvantages:**\n * The model will perform well only when the data is linearly separable (for classification).\n * Usually not used for classification and only for regression.\n* **Miscellaneous:** You can also use linear regression for binary classification tasks where if the predicted continuous value is above a threshold, it belongs to a certain class. But we will cover better techniques for classification is future lessons and will focus on linear regression for continuos regression tasks only.\n", "_____no_output_____" ], [ "# Training", "_____no_output_____" ], [ "*Steps*: \n1. Randomly initialize the model's weights $W$.\n2. Feed inputs $X$ into the model to receive the predictions $\\hat{y}$.\n3. Compare the predictions $\\hat{y}$ with the actual target values $y$ with the objective (cost) function to determine loss $J$. A common objective function for linear regression is mean squarred error (MSE). This function calculates the difference between the predicted and target values and squares it. (the $\\frac{1}{2}$ is just for convenicing the derivative operation).\n * $MSE = J(\\theta) = \\frac{1}{2}\\sum_{i}(\\hat{y}_i - y_i)^2$\n4. Calculate the gradient of loss $J(\\theta)$ w.r.t to the model weights.\n * $J(\\theta) = \\frac{1}{2}\\sum_{i}(\\hat{y}_i - y_i)^2 = \\frac{1}{2}\\sum_{i}(X_iW - y_i)^2 $\n * $\\frac{\\partial{J}}{\\partial{W}} = X(\\hat{y} - y)$\n4. Apply backpropagation to update the weights $W$ using a learning rate $\\alpha$ and an optimization technique (ie. stochastic gradient descent). The simplified intuition is that the gradient tells you the direction for how to increase something so subtracting it will help you go the other way since we want to decrease loss $J(\\theta)$.\n * $W = W- \\alpha\\frac{\\partial{J}}{\\partial{W}}$\n5. Repeat steps 2 - 4 until model performs well.", "_____no_output_____" ], [ "# Data", "_____no_output_____" ], [ "We're going to create some simple dummy data to apply linear regression on.", "_____no_output_____" ] ], [ [ "from argparse import Namespace\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd", "_____no_output_____" ], [ "# Arguments\nargs = Namespace(\n seed=1234,\n data_file=\"sample_data.csv\",\n num_samples=100,\n train_size=0.75,\n test_size=0.25,\n num_epochs=100,\n)\n\n# Set seed for reproducability\nnp.random.seed(args.seed)", "_____no_output_____" ], [ "# Generate synthetic data\ndef generate_data(num_samples):\n X = np.array(range(num_samples))\n y = 3.65*X + 10\n return X, y", "_____no_output_____" ], [ "# Generate random (linear) data\nX, y = generate_data(args.num_samples)\ndata = np.vstack([X, y]).T\ndf = pd.DataFrame(data, columns=['X', 'y'])\ndf.head()", "_____no_output_____" ], [ "# Scatter plot\nplt.title(\"Generated data\")\nplt.scatter(x=df[\"X\"], y=df[\"y\"])\nplt.show()", "_____no_output_____" ] ], [ [ "# Scikit-learn implementation", "_____no_output_____" ], [ "**Note**: The `LinearRegression` class in Scikit-learn uses the normal equation to solve the fit. However, we are going to use Scikit-learn's `SGDRegressor` class which uses stochastic gradient descent. We want to use this optimization approach because we will be using this for the models in subsequent lessons.", "_____no_output_____" ] ], [ [ "# Import packages\nfrom sklearn.linear_model.stochastic_gradient import SGDRegressor\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split", "_____no_output_____" ], [ "# Create data splits\nX_train, X_test, y_train, y_test = train_test_split(\n df[\"X\"].values.reshape(-1, 1), df[\"y\"], test_size=args.test_size, \n random_state=args.seed)\nprint (\"X_train:\", X_train.shape)\nprint (\"y_train:\", y_train.shape)\nprint (\"X_test:\", X_test.shape)\nprint (\"y_test:\", y_test.shape)", "X_train: (75, 1)\ny_train: (75,)\nX_test: (25, 1)\ny_test: (25,)\n" ] ], [ [ "We need to standardize our data (zero mean and unit variance) in order to properly use SGD and optimize quickly.", "_____no_output_____" ] ], [ [ "# Standardize the data (mean=0, std=1) using training data\nX_scaler = StandardScaler().fit(X_train)\ny_scaler = StandardScaler().fit(y_train.values.reshape(-1,1))\n\n# Apply scaler on training and test data\nstandardized_X_train = X_scaler.transform(X_train)\nstandardized_y_train = y_scaler.transform(y_train.values.reshape(-1,1)).ravel()\nstandardized_X_test = X_scaler.transform(X_test)\nstandardized_y_test = y_scaler.transform(y_test.values.reshape(-1,1)).ravel()\n\n\n# Check\nprint (\"mean:\", np.mean(standardized_X_train, axis=0), \n np.mean(standardized_y_train, axis=0)) # mean should be ~0\nprint (\"std:\", np.std(standardized_X_train, axis=0), \n np.std(standardized_y_train, axis=0)) # std should be 1", "mean: [8.22952817e-17] -1.5617137213060536e-16\nstd: [1.] 0.9999999999999999\n" ], [ "# Initialize the model\nlm = SGDRegressor(loss=\"squared_loss\", penalty=\"none\", max_iter=args.num_epochs)", "_____no_output_____" ], [ "# Train\nlm.fit(X=standardized_X_train, y=standardized_y_train)", "_____no_output_____" ], [ "# Predictions (unstandardize them)\npred_train = (lm.predict(standardized_X_train) * np.sqrt(y_scaler.var_)) + y_scaler.mean_\npred_test = (lm.predict(standardized_X_test) * np.sqrt(y_scaler.var_)) + y_scaler.mean_", "_____no_output_____" ] ], [ [ "# Evaluation", "_____no_output_____" ], [ "There are several evaluation techniques to see how well our model performed.", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt", "_____no_output_____" ], [ "# Train and test MSE\ntrain_mse = np.mean((y_train - pred_train) ** 2)\ntest_mse = np.mean((y_test - pred_test) ** 2)\nprint (\"train_MSE: {0:.2f}, test_MSE: {1:.2f}\".format(train_mse, test_mse))", "train_MSE: 0.00, test_MSE: 0.00\n" ] ], [ [ "Besides MSE, when we only have one feature, we can visually inspect the model.", "_____no_output_____" ] ], [ [ "# Figure size\nplt.figure(figsize=(15,5))\n\n# Plot train data\nplt.subplot(1, 2, 1)\nplt.title(\"Train\")\nplt.scatter(X_train, y_train, label=\"y_train\")\nplt.plot(X_train, pred_train, color=\"red\", linewidth=1, linestyle=\"-\", label=\"lm\")\nplt.legend(loc='lower right')\n\n# Plot test data\nplt.subplot(1, 2, 2)\nplt.title(\"Test\")\nplt.scatter(X_test, y_test, label=\"y_test\")\nplt.plot(X_test, pred_test, color=\"red\", linewidth=1, linestyle=\"-\", label=\"lm\")\nplt.legend(loc='lower right')\n\n# Show plots\nplt.show()", "_____no_output_____" ] ], [ [ "# Inference", "_____no_output_____" ] ], [ [ "# Feed in your own inputs\nX_infer = np.array((0, 1, 2), dtype=np.float32)\nstandardized_X_infer = X_scaler.transform(X_infer.reshape(-1, 1))\npred_infer = (lm.predict(standardized_X_infer) * np.sqrt(y_scaler.var_)) + y_scaler.mean_\nprint (pred_infer)\ndf.head(3)", "[10.00356575 13.65348546 17.30340518]\n" ] ], [ [ "# Interpretability", "_____no_output_____" ], [ "Linear regression offers the great advantage of being highly interpretable. Each feature has a coefficient which signifies it's importance/impact on the output variable y. We can interpret our coefficient as follows: By increasing X by 1 unit, we increase y by $W$ (~3.65) units. \n\n**Note**: Since we standardized our inputs and outputs for gradient descent, we need to apply an operation to our coefficients and intercept to interpret them. See proof below.", "_____no_output_____" ] ], [ [ "# Unstandardize coefficients \ncoef = lm.coef_ * (y_scaler.scale_/X_scaler.scale_)\nintercept = lm.intercept_ * y_scaler.scale_ + y_scaler.mean_ - np.sum(coef*X_scaler.mean_)\nprint (coef) # ~3.65\nprint (intercept) # ~10", "[3.64992275]\n[10.00356788]\n" ] ], [ [ "### Proof for unstandardizing coefficients:\n\n", "_____no_output_____" ], [ "Note that both X and y were standardized.\n\n$\\frac{\\mathbb{E}[y] - \\hat{y}}{\\sigma_y} = W_0 + \\sum_{j=1}^{k}W_jz_j$\n\n$z_j = \\frac{x_j - \\bar{x}_j}{\\sigma_j}$\n\n$ \\hat{y}_{scaled} = \\frac{\\hat{y}_{unscaled} - \\bar{y}}{\\sigma_y} = \\hat{W_0} + \\sum_{j=1}^{k} \\hat{W}_j (\\frac{x_j - \\bar{x}_j}{\\sigma_j}) $\n\n$\\hat{y}_{unscaled} = \\hat{W}_0\\sigma_y + \\bar{y} - \\sum_{j=1}^{k} \\hat{W}_j(\\frac{\\sigma_y}{\\sigma_j})\\bar{x}_j + \\sum_{j=1}^{k}(\\frac{\\sigma_y}{\\sigma_j})x_j $\n", "_____no_output_____" ], [ "# Regularization", "_____no_output_____" ], [ "Regularization helps decrease over fitting. Below is L2 regularization (ridge regression). There are many forms of regularization but they all work to reduce overfitting in our models. With L2 regularization, we are penalizing the weights with large magnitudes by decaying them. Having certain weights with high magnitudes will lead to preferential bias with the inputs and we want the model to work with all the inputs and not just a select few. There are also other types of regularization like L1 (lasso regression) which is useful for creating sparse models where some feature cofficients are zeroed out, or elastic which combines L1 and L2 penalties. \n\n**Note**: Regularization is not just for linear regression. You can use it to regualr any model's weights including the ones we will look at in future lessons.", "_____no_output_____" ], [ "* $ J(\\theta) = = \\frac{1}{2}\\sum_{i}(X_iW - y_i)^2 + \\frac{\\lambda}{2}\\sum\\sum W^2$\n* $ \\frac{\\partial{J}}{\\partial{W}} = X (\\hat{y} - y) + \\lambda W $\n* $W = W- \\alpha\\frac{\\partial{J}}{\\partial{W}}$\nwhere:\n * $\\lambda$ is the regularzation coefficient", "_____no_output_____" ] ], [ [ "# Initialize the model with L2 regularization\nlm = SGDRegressor(loss=\"squared_loss\", penalty='l2', alpha=1e-2, \n max_iter=args.num_epochs)", "_____no_output_____" ], [ "# Train\nlm.fit(X=standardized_X_train, y=standardized_y_train)", "_____no_output_____" ], [ "# Predictions (unstandardize them)\npred_train = (lm.predict(standardized_X_train) * np.sqrt(y_scaler.var_)) + y_scaler.mean_\npred_test = (lm.predict(standardized_X_test) * np.sqrt(y_scaler.var_)) + y_scaler.mean_", "_____no_output_____" ], [ "# Train and test MSE\ntrain_mse = np.mean((y_train - pred_train) ** 2)\ntest_mse = np.mean((y_test - pred_test) ** 2)\nprint (\"train_MSE: {0:.2f}, test_MSE: {1:.2f}\".format(\n train_mse, test_mse))", "train_MSE: 1.09, test_MSE: 1.15\n" ] ], [ [ "Regularization didn't help much with this specific example because our data is generation from a perfect linear equation but for realistic data, regularization can help our model generalize well.", "_____no_output_____" ] ], [ [ "# Unstandardize coefficients \ncoef = lm.coef_ * (y_scaler.scale_/X_scaler.scale_)\nintercept = lm.intercept_ * y_scaler.scale_ + y_scaler.mean_ - (coef*X_scaler.mean_)\nprint (coef) # ~3.65\nprint (intercept) # ~10", "[3.61386358]\n[11.67635438]\n" ] ], [ [ "# Categorical variables", "_____no_output_____" ], [ "In our example, the feature was a continuous variable but what if we also have features that are categorical? One option is to treat the categorical variables as one-hot encoded variables. This is very easy to do with Pandas and once you create the dummy variables, you can use the same steps as above to train your linear model.", "_____no_output_____" ] ], [ [ "# Create data with categorical features\ncat_data = pd.DataFrame(['a', 'b', 'c', 'a'], columns=['favorite_letter'])\ncat_data.head()", "_____no_output_____" ], [ "dummy_cat_data = pd.get_dummies(cat_data)\ndummy_cat_data.head()", "_____no_output_____" ] ], [ [ "Now you can concat this with your continuous features and train the linear model.", "_____no_output_____" ], [ "# TODO", "_____no_output_____" ], [ "- polynomial regression\n- simple example with normal equation method (sklearn.linear_model.LinearRegression) with pros and cons vs. SGD linear regression", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ] ]
c52d9261b8e7d3ae58779e588a897f86750ec479
186,068
ipynb
Jupyter Notebook
hw2/HW_02.ipynb
AdamGTaylor/DataMining_2021
1f4cfdf844f937a29b2bf4a82e0b507161c38cd8
[ "MIT" ]
null
null
null
hw2/HW_02.ipynb
AdamGTaylor/DataMining_2021
1f4cfdf844f937a29b2bf4a82e0b507161c38cd8
[ "MIT" ]
null
null
null
hw2/HW_02.ipynb
AdamGTaylor/DataMining_2021
1f4cfdf844f937a29b2bf4a82e0b507161c38cd8
[ "MIT" ]
null
null
null
135.618076
38,496
0.821748
[ [ [ "<a href=\"https://colab.research.google.com/gist/qbeer/370770dacb737a35fb06725b69a13c05/02_blank.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Unsupervised learning & clustering\n----\n### 1. Reading data\nThe worldbank_jobs_2016.tsv (can be found in the same folder with this notebook) file contains the Jobs (and other) data for the 2016 year, downloaded from The World Bank's webpage.\n\n- Look at the data in any text editor. Build up an overall sense how the data is built up and how the missing values are represented.\n- Read the file into a pandas dataframe and tell pandas the delimiter (or separator) that separates the columns and which special pattern means if a value is missing.\n- Keep only those rows, which represents countries, at the end there are some useless rows (with missing country code).\n- The data is in a long format. Convert it into a wide format, where each row is a single country (with country code) and the column names are the features i.e. the Series Codes, the values in the columns are the measured values of the 2016 [YR 2016 column]. (eg the first column is 'EG.CFT.ACCS.ZS', the second is 'EG.ELC.ACCS.ZS'. Order of the columns does not matter)! Try to use the pivot method.\n- Check that the features are in numeric format (dtypes), this will be needed for modeling!\n\n\n#### 1/a. Data loading, NaN row removal", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport numpy as np \nimport pandas as pd\n\nimport sys\nimport os\n\nimport seaborn as sns", "_____no_output_____" ], [ "#tabulator separated file, which has missing values that pandas inserts as NaNs\nworldbank_data = pd.read_csv(\"02_dataset_worldbank_jobs_2016.tsv\", delimiter=\"\\t\", na_values='..')\nprint(worldbank_data.columns)\nprint(pd.unique(worldbank_data[\"Country Name\"])) #let's see if these are like actual countries\nprint(pd.unique(worldbank_data[\"Country Code\"])) # country code could have been used to remove bad rows\nworldbank_data1 = worldbank_data[:-5] #last 5 rows to be removed\nprint(~worldbank_data1.isna())\nprint(worldbank_data1.isna().sum()) #this simply tells how many nans are in the data, remaining\n\n#may become handy\nfeatures = {n : c for (n, c) in zip(worldbank_data['Series Code'],worldbank_data['Series Name'])}", "Index(['Country Name', 'Country Code', 'Series Name', 'Series Code',\n '2016 [YR2016]'],\n dtype='object')\n['Afghanistan' 'Albania' 'Algeria' 'American Samoa' 'Andorra' 'Angola'\n 'Antigua and Barbuda' 'Argentina' 'Armenia' 'Aruba' 'Australia' 'Austria'\n 'Azerbaijan' 'Bahamas, The' 'Bahrain' 'Bangladesh' 'Barbados' 'Belarus'\n 'Belgium' 'Belize' 'Benin' 'Bermuda' 'Bhutan' 'Bolivia'\n 'Bosnia and Herzegovina' 'Botswana' 'Brazil' 'British Virgin Islands'\n 'Brunei Darussalam' 'Bulgaria' 'Burkina Faso' 'Burundi' 'Cabo Verde'\n 'Cambodia' 'Cameroon' 'Canada' 'Cayman Islands'\n 'Central African Republic' 'Chad' 'Channel Islands' 'Chile' 'China'\n 'Colombia' 'Comoros' 'Congo, Dem. Rep.' 'Congo, Rep.' 'Costa Rica'\n \"Cote d'Ivoire\" 'Croatia' 'Cuba' 'Curacao' 'Cyprus' 'Czech Republic'\n 'Denmark' 'Djibouti' 'Dominica' 'Dominican Republic' 'Ecuador'\n 'Egypt, Arab Rep.' 'El Salvador' 'Equatorial Guinea' 'Eritrea' 'Estonia'\n 'Eswatini' 'Ethiopia' 'Faroe Islands' 'Fiji' 'Finland' 'France'\n 'French Polynesia' 'Gabon' 'Gambia, The' 'Georgia' 'Germany' 'Ghana'\n 'Gibraltar' 'Greece' 'Greenland' 'Grenada' 'Guam' 'Guatemala' 'Guinea'\n 'Guinea-Bissau' 'Guyana' 'Haiti' 'Honduras' 'Hong Kong SAR, China'\n 'Hungary' 'Iceland' 'India' 'Indonesia' 'Iran, Islamic Rep.' 'Iraq'\n 'Ireland' 'Isle of Man' 'Israel' 'Italy' 'Jamaica' 'Japan' 'Jordan'\n 'Kazakhstan' 'Kenya' 'Kiribati' 'Korea, Dem. People’s Rep.' 'Korea, Rep.'\n 'Kosovo' 'Kuwait' 'Kyrgyz Republic' 'Lao PDR' 'Latvia' 'Lebanon'\n 'Lesotho' 'Liberia' 'Libya' 'Liechtenstein' 'Lithuania' 'Luxembourg'\n 'Macao SAR, China' 'Macedonia, FYR' 'Madagascar' 'Malawi' 'Malaysia'\n 'Maldives' 'Mali' 'Malta' 'Marshall Islands' 'Mauritania' 'Mauritius'\n 'Mexico' 'Micronesia, Fed. Sts.' 'Moldova' 'Monaco' 'Mongolia'\n 'Montenegro' 'Morocco' 'Mozambique' 'Myanmar' 'Namibia' 'Nauru' 'Nepal'\n 'Netherlands' 'New Caledonia' 'New Zealand' 'Nicaragua' 'Niger' 'Nigeria'\n 'Northern Mariana Islands' 'Norway' 'Oman' 'Pakistan' 'Palau' 'Panama'\n 'Papua New Guinea' 'Paraguay' 'Peru' 'Philippines' 'Poland' 'Portugal'\n 'Puerto Rico' 'Qatar' 'Romania' 'Russian Federation' 'Rwanda' 'Samoa'\n 'San Marino' 'Sao Tome and Principe' 'Saudi Arabia' 'Senegal' 'Serbia'\n 'Seychelles' 'Sierra Leone' 'Singapore' 'Sint Maarten (Dutch part)'\n 'Slovak Republic' 'Slovenia' 'Solomon Islands' 'Somalia' 'South Africa'\n 'South Sudan' 'Spain' 'Sri Lanka' 'St. Kitts and Nevis' 'St. Lucia'\n 'St. Martin (French part)' 'St. Vincent and the Grenadines' 'Sudan'\n 'Suriname' 'Sweden' 'Switzerland' 'Syrian Arab Republic' 'Tajikistan'\n 'Tanzania' 'Thailand' 'Timor-Leste' 'Togo' 'Tonga' 'Trinidad and Tobago'\n 'Tunisia' 'Turkey' 'Turkmenistan' 'Turks and Caicos Islands' 'Tuvalu'\n 'Uganda' 'Ukraine' 'United Arab Emirates' 'United Kingdom'\n 'United States' 'Uruguay' 'Uzbekistan' 'Vanuatu' 'Venezuela, RB'\n 'Vietnam' 'Virgin Islands (U.S.)' 'West Bank and Gaza' 'Yemen, Rep.'\n 'Zambia' 'Zimbabwe' nan 'Data from database: Jobs'\n 'Last Updated: 02/17/2021']\n['AFG' 'ALB' 'DZA' 'ASM' 'AND' 'AGO' 'ATG' 'ARG' 'ARM' 'ABW' 'AUS' 'AUT'\n 'AZE' 'BHS' 'BHR' 'BGD' 'BRB' 'BLR' 'BEL' 'BLZ' 'BEN' 'BMU' 'BTN' 'BOL'\n 'BIH' 'BWA' 'BRA' 'VGB' 'BRN' 'BGR' 'BFA' 'BDI' 'CPV' 'KHM' 'CMR' 'CAN'\n 'CYM' 'CAF' 'TCD' 'CHI' 'CHL' 'CHN' 'COL' 'COM' 'COD' 'COG' 'CRI' 'CIV'\n 'HRV' 'CUB' 'CUW' 'CYP' 'CZE' 'DNK' 'DJI' 'DMA' 'DOM' 'ECU' 'EGY' 'SLV'\n 'GNQ' 'ERI' 'EST' 'SWZ' 'ETH' 'FRO' 'FJI' 'FIN' 'FRA' 'PYF' 'GAB' 'GMB'\n 'GEO' 'DEU' 'GHA' 'GIB' 'GRC' 'GRL' 'GRD' 'GUM' 'GTM' 'GIN' 'GNB' 'GUY'\n 'HTI' 'HND' 'HKG' 'HUN' 'ISL' 'IND' 'IDN' 'IRN' 'IRQ' 'IRL' 'IMN' 'ISR'\n 'ITA' 'JAM' 'JPN' 'JOR' 'KAZ' 'KEN' 'KIR' 'PRK' 'KOR' 'XKX' 'KWT' 'KGZ'\n 'LAO' 'LVA' 'LBN' 'LSO' 'LBR' 'LBY' 'LIE' 'LTU' 'LUX' 'MAC' 'MKD' 'MDG'\n 'MWI' 'MYS' 'MDV' 'MLI' 'MLT' 'MHL' 'MRT' 'MUS' 'MEX' 'FSM' 'MDA' 'MCO'\n 'MNG' 'MNE' 'MAR' 'MOZ' 'MMR' 'NAM' 'NRU' 'NPL' 'NLD' 'NCL' 'NZL' 'NIC'\n 'NER' 'NGA' 'MNP' 'NOR' 'OMN' 'PAK' 'PLW' 'PAN' 'PNG' 'PRY' 'PER' 'PHL'\n 'POL' 'PRT' 'PRI' 'QAT' 'ROU' 'RUS' 'RWA' 'WSM' 'SMR' 'STP' 'SAU' 'SEN'\n 'SRB' 'SYC' 'SLE' 'SGP' 'SXM' 'SVK' 'SVN' 'SLB' 'SOM' 'ZAF' 'SSD' 'ESP'\n 'LKA' 'KNA' 'LCA' 'MAF' 'VCT' 'SDN' 'SUR' 'SWE' 'CHE' 'SYR' 'TJK' 'TZA'\n 'THA' 'TLS' 'TGO' 'TON' 'TTO' 'TUN' 'TUR' 'TKM' 'TCA' 'TUV' 'UGA' 'UKR'\n 'ARE' 'GBR' 'USA' 'URY' 'UZB' 'VUT' 'VEN' 'VNM' 'VIR' 'PSE' 'YEM' 'ZMB'\n 'ZWE' nan]\n Country Name Country Code Series Name Series Code 2016 [YR2016]\n0 True True True True True\n1 True True True True True\n2 True True True True True\n3 True True True True True\n4 True True True True True\n... ... ... ... ... ...\n36017 True True True True True\n36018 True True True True True\n36019 True True True True True\n36020 True True True True True\n36021 True True True True True\n\n[36022 rows x 5 columns]\nCountry Name 0\nCountry Code 0\nSeries Name 0\nSeries Code 0\n2016 [YR2016] 12908\ndtype: int64\n" ] ], [ [ "So it seems like that only the end has that couple rows that is unneeded. With the country code, that can be removed. The last 5 rows had to be removed. Due to only these rows being not data rows, they could be removed by just simply counting them.\n\nAfter a quick revision, I see something like \"..\" in data which are NaNs actually -> fixed it, pandas.read_csv can handle predefined\n\n#### 1/b. Long to wide dataformat", "_____no_output_____" ] ], [ [ "worldbank_data_wide = worldbank_data1.pivot(index=\"Country Name\", columns=\"Series Code\", values = \"2016 [YR2016]\") #my actual first successful pivot... i can feel its strength!\ndisplay(worldbank_data_wide) #display as intelligent print", "_____no_output_____" ] ], [ [ "#### 1/c. Numeric Dataformat\n\nDon't really know whats going on here, but I am going to look into the value formats", "_____no_output_____" ] ], [ [ "type(worldbank_data_wide.loc[\"Hungary\"][0]) == np.float64", "_____no_output_____" ] ], [ [ "Okey, let's go through the whole data...", "_____no_output_____" ] ], [ [ "array_copy = np.array(worldbank_data_wide).T #copy and transpose\narray_f_str_count = np.zeros(array_copy.shape[0])\nfor i,j in enumerate(array_copy): #i am too used to for i in range(x_min,x_max): for j in range(y_min,y_max):\n array_f_str_count[i] = sum(1 for value in j if type(value)==str)\n \ndisplay(array_f_str_count) #so they seem like they are in the correct format", "_____no_output_____" ] ], [ [ "I don't think I need a conversion to numeric, but I will do it just get to know it.", "_____no_output_____" ] ], [ [ "for column in worldbank_data_wide.columns:\n worldbank_data_wide[column] = pd.to_numeric(worldbank_data_wide[column])", "_____no_output_____" ] ], [ [ "<font size=\"4\"> My guess is that the many \"..\" are messing with datatypes and I added them as a NaN format to read_csv to load it as NaN, which is in float64, which is numeric. I really like how pleasently smart and easy it is.</font>", "_____no_output_____" ], [ "-----\n### 2. Data preprocessing and inspection\n- Visualize the missing values!\n- Keep only those countries which has less than 60 missing features in the original table.\n- After this drop all features which have missing values for the remaining countries. (Imputation would also work but may introduce a bias because there is less data for less developed countries generally.)\n- How many counties and features do we have left?\n- Read the kept features' descriptions. In the original table the Series Name describe the meaning of the features. What do you think, based only on these information, which counties are the most similar to Hungary? And Greece?\n\n#### 2/a. Visualizing missing data", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(166/15, 217/15))\nim = plt.imshow(worldbank_data_wide.isna(), aspect='auto')\nplt.show()", "_____no_output_____" ] ], [ [ "Okey, seems like there is a lot of missing data present. ONTO THE SORTING!\n#### 2/b. Dropping rows with more than 60 missing values\npandas dropna helps me out here!", "_____no_output_____" ] ], [ [ "worldbank_data_wide_c = worldbank_data_wide.dropna(axis='rows', thresh=(166-60))\nworldbank_data_wide_c\n\nplt.figure(figsize=(166/15, 217/15))\nplt.title(\"Countries with less than 60 missing values\", fontsize=24)\nim = plt.imshow(worldbank_data_wide_c.isna(), aspect='auto')\nplt.show()", "_____no_output_____" ] ], [ [ "#### 2/c. Dropping features", "_____no_output_____" ] ], [ [ "worldbank_data_wide_cnf = worldbank_data_wide_c.dropna(axis=\"columns\", thresh=110) #where to draw the threshhold is a mystery\nplt.figure(figsize=(166/15, 217/15))\nplt.title(\"Filtered countries and features\", fontsize=24)\nim = plt.imshow(worldbank_data_wide_cnf.isna(), aspect='auto')\nplt.show()", "_____no_output_____" ] ], [ [ "So, it looks mostly clean. I had to try different threshholds to see which one is good to remove most of the NaNs but won't remove all of them.\n\n`NOTE`: yes, fill NaNs with something - the mean of each column. NaN values stop PCA due to the no comparison, hence filling in is required. Means involve bias, but I removed most of the means, so minimal bias is inserted.", "_____no_output_____" ] ], [ [ "worldbank_data_wide_cnf1 = worldbank_data_wide_cnf.fillna(worldbank_data_wide_cnf.mean())", "_____no_output_____" ] ], [ [ "#### 2/d. Closest country to Hungary and Greece.\n\nMy initial thought is to measure the Hamming distance with each country, but that not just seems to be way obvious and some stuff doesn't doesnt scale in a linear way.", "_____no_output_____" ] ], [ [ "#display(worldbank_data_wide_cnf.loc['Hungary'])\n#display(worldbank_data_wide_cnf.loc['Greece'])\n#display(worldbank_data_wide_cnf.index[0])\n\ndist1 = np.zeros(len(worldbank_data_wide_cnf.index))\n\n#go through data\n\"\"\"\nfor i in worldbank_data_wide_cnf.index:\n #country name is i\n partial_sum = 0\n for j in range(0,len(worldbank_data_wide_cnf.loc[country_name])):\n if country_name != \"Hungary\":\n partial_sum += np.abs(worldbank_data_wide_cnf.loc[i][j] - worldbank_data_wide_cnf.loc[\"Hungary\"][j]) / worldbank_data_wide_cnf[worldbank_data_wide_cnf.columns[j]]\n #calculate distance and then normalize it\n print(partial_sum)\n\"\"\"", "_____no_output_____" ], [ "country1 = \"Hungary\"\ncountry2 = \"Greece\"\n\n#for j in range(0,len(worldbank_data_wide_cnf.loc[country_name])):\n #partial_sum += np.abs(worldbank_data_wide_cnf.loc[country1][j] - worldbank_data_wide_cnf.loc[country2][j]) / np.max(np.abs(worldbank_data_wide_cnf[worldbank_data_wide_cnf.columns[j]][j]))\n#print((worldbank_data_wide_cnf.loc[country1][0] - worldbank_data_wide_cnf.loc[country2][0]) / np.max(np.abs(worldbank_data_wide_cnf[worldbank_data_wide_cnf.columns[j]][j])))\nfor j in range(0,len(worldbank_data_wide_cnf1.loc[country_name])):\n print(np.max(np.abs(worldbank_data_wide_cnf1[worldbank_data_wide_cnf1.columns[j]][j])))\nprint(np.max(np.abs(worldbank_data_wide_cnf1[worldbank_data_wide_cnf1.columns[j]][j])))", "0.0758945950132622\n147128144.977279\n1.02947518020851\n352639302.322353\n100.0\n3.1515052783671598\n116.05858871334499\n132.387827722937\n0.34637693\n81.27358619311227\n45.2795104427428\n7.0\n0.0\n0.0\n4.0\n225.0\n90.0\n65.0\n152.0\n9.6\n11058.0\n125.83434831467301\n0.40612057008436897\n0.0374853301231828\n50.3228178676892\n8.61742357093553\n18.583480764197898\n3.85380920890056\n13.627992453460902\n39.6708289038252\n9093538889.298521\n3.3\n6.6714829466281405\n5601275837.35185\n4.7676748300516705\n12.8417779873136\n13262493197.5264\n7.83319602002661\n33.815689133586396\n1.9640771294674702\n14804.280286408853\n14120.2116371665\n0.3\n38.4370002746582\n29.2590007781982\n3.89400005340576\n1.30400002002716\n0.912000000476837\n1.4529999494552601\n6.90899991989136\n8.72000026702881\n56.801998138427706\n56.310001373290994\n12.864000320434599\n73.1989974975586\n3751523.0\n45.0229988098145\n84.4119987487793\n65.22399842739101\n70.8870010375977\n55.021999359130895\n90.9980010986328\n0.39599999785423295\n0.0590000003576279\n13.2440004348755\n23380.791015625\n25.1770000457764\n28.4039993286133\n17.3500003814697\n84.588996887207\n52.354000091552706\n70.8830032348633\n13.895999908447301\n77.2269973754883\n65.443000793457\n27719344.0\n37.523998260498\n12.788999557495101\n1.6599999666213998\n8.39400005340576\n5.3289999961853\n27.7819995880127\n11.392000000000001\n84.7\n80.866\n1.5330000000000001\n10279811.0\n44.271982953391294\n21613466.0\n72.4642085675011\n454428.0\n18.9385646610552\n76.1267189789361\n0.0687231379365168\n127540423.0\n2041983.0\n31.703000000000003\n411597.0\n61.36\n2.8342886749000002\n425.0412076\n1.47022422929957\n86.56082090000001\n17.777874663850103\n17.777874663850103\n" ], [ "#worldbank_data_wide_cnf[worldbank_data_wide_cnf.columns[9]][9]", "_____no_output_____" ] ], [ [ "Okey, I can't see it through, but I feel that I am inches close.", "_____no_output_____" ], [ "------\n### 3. PCA\n- Perform PCA with 3 principal components on the filtered, imputed data (from now on, data refers to the filtered, imputed dataset)\n- Plot the three embedded 2D combination next to each other (0 vs 1, 0 vs 2 and 1 vs 2)\n- It seems that the embedding is really dominated by a single direction. Normalize the data (each feature should have zero mean and unit variance after normalization) and re-do the PCA and the plotting (do not delete the previous plots, just make new ones).\n- Give some explaination for the second principal component: Look at the coefficients of the features which were use the calculate that principal component. For the features with the largest coefficient (in absolute value) look up the Series Name for the Code.\n", "_____no_output_____" ] ], [ [ "#stuff from sklearn, have to get to know it better\nfrom sklearn import decomposition\nfrom sklearn.manifold import TSNE\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.preprocessing import StandardScaler\n\n#as i was normalazing earlier the sums, it good to have a function that normalazes input data\ndef norm_data(x):\n #I DONT LIKE BURNING\n x = pd.DataFrame(StandardScaler().fit_transform(x), index=worldbank_data_wide_cnf1.index, columns=worldbank_data_wide_cnf1.columns)\n return x\n\n#lets do a pca\ndef PCA(data):\n #do pca -> return data\n pca = decomposition.PCA(n_components=3) #burning in 3d\n #TypeError: float() argument must be a string or a number, not 'method' - some idiot with xxx.mean without ()\n pca.fit(data)\n data_pca = pca.transform(data)\n \n #and to see for impactful the PCA features\n ex_var = np.var(data_pca, axis=0)\n ex_var_ratio = ex_var/np.sum(ex_var)\n \n return pca, data_pca, ex_var_ratio\n\ndef get_impactful_features(pca, features,f_num):\n \n pca_basis = pca.components_\n \n pc_f_idx = []\n pc_f = []\n for pc in range(pca_basis.shape[0]):\n pc_current = pca_basis[pc]\n pc_n_f_idx = []\n pc_n_f = []\n for i in range(f_num):\n f_idx = np.where(np.abs(pc_current) == sorted(np.abs(pc_current))[::-1][i])[0][0]\n pc_n_f_idx.append(f_idx)\n pc_n_f.append(list(features.keys())[f_idx])\n pc_f_idx.append(pc_n_f_idx)\n pc_f.append(pc_n_f)\n\n return pc_f_idx, pc_f\n\n#great help https://towardsdatascience.com/pca-clearly-explained-how-when-why-to-use-it-and-feature-importance-a-guide-in-python-7c274582c37e", "_____no_output_____" ] ], [ [ "#### 3/a. Unscaled (No normalization)", "_____no_output_____" ] ], [ [ "pca, data_pca, ex_var_ration = PCA(data=worldbank_data_wide_cnf1)\nprint(\"Ration [{:.3f} {:.3f} {:.3f} ]\\t Weight of the three features: {:.3f}\".format(*ex_var_ration, sum(ex_var_ration))) #YES WE HAVE THEEEEM\n\nfig, ax = plt.subplots(1,3, figsize=(15,5))\nax[0].scatter(data_pca[:,0], data_pca[:,1])\nax[1].scatter(data_pca[:,1], data_pca[:,2])\nax[2].scatter(data_pca[:,0], data_pca[:,2])\n\nax[0].set_xlabel(\"First PC\", fontsize=19)\nax[1].set_xlabel(\"Second PC\", fontsize=19)\nax[2].set_xlabel(\"First PC\", fontsize=19)\n\nax[0].set_ylabel(\"Second PC\", fontsize=19)\nax[1].set_ylabel(\"Third PC\", fontsize=19)\nax[2].set_ylabel(\"Third PC\", fontsize=19)\n\nfor i in range(3):\n ax[i].grid(True)\n\nfig.tight_layout()\nplt.show()", "Ration [0.917 0.080 0.003 ]\t Weight of the three features: 1.000\n" ] ], [ [ "#### 3/b. Scaled (Normalization)", "_____no_output_____" ] ], [ [ "worldbank_wide_cnf1_norm = norm_data(worldbank_data_wide_cnf1)\n\npca_s, data_pca_s, ex_var_ration_s = PCA(data=worldbank_wide_cnf1_norm)\nprint(\"Ration [{:.3f} {:.3f} {:.3f} ]\\t Weight of the three features: {:.3f}\".format(*ex_var_ration_s, sum(ex_var_ration_s))) #YES WE HAVE THEEEEM\n\nfig, ax = plt.subplots(1,3, figsize=(15,5))\nax[0].scatter(data_pca_s[:,0], data_pca_s[:,1])\nax[1].scatter(data_pca_s[:,1], data_pca_s[:,2])\nax[2].scatter(data_pca_s[:,0], data_pca_s[:,2])\n\nax[0].set_xlabel(\"First PC\", fontsize=19)\nax[1].set_xlabel(\"Second PC\", fontsize=19)\nax[2].set_xlabel(\"First PC\", fontsize=19)\n\nax[0].set_ylabel(\"Second PC\", fontsize=19)\nax[1].set_ylabel(\"Third PC\", fontsize=19)\nax[2].set_ylabel(\"Third PC\", fontsize=19)\n\nfor i in range(3):\n ax[i].grid(True)\n\nfig.tight_layout()\nplt.show()", "Ration [0.655 0.208 0.137 ]\t Weight of the three features: 1.000\n" ] ], [ [ "OKEY! So a scaling could be very meaningful! And shows how impactful some features really are, due to some overshadowing. Let's see why this change exists! I moved the dict with upper, but i still have to get what it is...\n", "_____no_output_____" ] ], [ [ "pc_f_idx, pc_f = get_impactful_features(pca, features, 5)\npc_f = np.array(pc_f)\n\nk = list(features.keys())\nv = list(features.values())\n\nfor i in range(pc_f.shape[1]):\n print('{} : {}'.format(i,v[k.index(str(pc_f[1,i]))]))", "0 : Employment in industry, male (% of male employment) (modeled ILO estimate)\n1 : Employment in services, male (% of male employment) (modeled ILO estimate)\n2 : Employment in agriculture, male (% of male employment) (modeled ILO estimate)\n3 : Adolescent fertility rate (births per 1,000 women ages 15-19)\n4 : Agriculture, value added (% of GDP)\n" ] ], [ [ "So the second PC is related to employment rates... Not suprise that this is the second PC\n\n<font size=\"4\"> This is where I stop. Due to illness, I am much slower... </font>", "_____no_output_____" ], [ "-----\n### 4. T-SNE\n- Perform T-SNE on the scaled data with 2 components\n- Plot the embeddings results. Add a text label for each point to make it possible to interpret the results. It will not be possible to read all, but try to make it useful, see the attached image as an example!\n- Highlight Hungary, Greece, Norway, China, Russia (HUN, GRC, NOR, CHN, RUS)! Which countries are the closest one to Hungary and Greece?\n", "_____no_output_____" ], [ "-------\n### 5. Hierarchical and K-Means clustering\n- Perform hierarchical clustering on the filtered and scaled data (hint: use seaborn)\n- Try to plot in a way that all country's name is visible\n- Perform K-Means clustering on the filtered and scaled data with 4 clusters.\n- Make a plot with text label for each point as in the previous excersice but use different color for every cluster.\n- Write down your impressions that you got from these two plots! Which cluster are China and Hungary in?\n", "_____no_output_____" ], [ "----\n### Hints:\n- On total you can get 10 points for fully completing all tasks.\n- Decorate your notebook with questions, explanation etc, make it self contained and understandable!\n- Comment your code when necessary!\n- Write functions for repetitive tasks!\n- Use the pandas package for data loading and handling\n- Use matplotlib and seaborn for plotting or bokeh and plotly for interactive investigation\n- Use the scikit learn package for almost everything\n- Use for loops only if it is really necessary!\n- Code sharing is not allowed between students! Sharing code will result in zero points.\n- If you use code found on web, it is OK, but, make its source clear!", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ] ]
c52d97a26824e6d5f640f550e557b4024c12b419
727
ipynb
Jupyter Notebook
Question23.ipynb
hackpert/CBSECSPracticals
4ea791c93cf6efdbad8757fda57f324f59abe83b
[ "MIT" ]
null
null
null
Question23.ipynb
hackpert/CBSECSPracticals
4ea791c93cf6efdbad8757fda57f324f59abe83b
[ "MIT" ]
null
null
null
Question23.ipynb
hackpert/CBSECSPracticals
4ea791c93cf6efdbad8757fda57f324f59abe83b
[ "MIT" ]
null
null
null
20.194444
88
0.508941
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
c52da587a102646c9611af9b7e39274ed87e2e11
481,232
ipynb
Jupyter Notebook
ICCT_en/examples/03/FD-20_1DoF_Mass-spring-damper.ipynb
ICCTerasmus/ICCT
fcd56ab6b5fddc00f72521cc87accfdbec6068f6
[ "BSD-3-Clause" ]
6
2021-05-22T18:42:14.000Z
2021-10-03T14:10:22.000Z
ICCT_en/examples/03/FD-20_1DoF_Mass-spring-damper.ipynb
ICCTerasmus/ICCT
fcd56ab6b5fddc00f72521cc87accfdbec6068f6
[ "BSD-3-Clause" ]
null
null
null
ICCT_en/examples/03/FD-20_1DoF_Mass-spring-damper.ipynb
ICCTerasmus/ICCT
fcd56ab6b5fddc00f72521cc87accfdbec6068f6
[ "BSD-3-Clause" ]
2
2021-05-24T11:40:09.000Z
2021-08-29T16:36:18.000Z
154.687239
137,825
0.83157
[ [ [ "%matplotlib notebook\nimport control as c\nimport ipywidgets as w\nimport numpy as np\n\nfrom IPython.display import display, HTML\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport matplotlib.animation as animation\n\ndisplay(HTML('<script> $(document).ready(function() { $(\"div.input\").hide(); }); </script>'))", "_____no_output_____" ] ], [ [ "## Control design for a 1DoF mass-spring-damper system\n\nThe following example is a control design task for a mass-spring-damper system, a typical second-order model. The structure consists of a sliding mass (friction is ignored), connected to a reference point with an infinitely expandable string-damper pair.<br><br>\n<img src=\"Images/mbk.png\" width=\"40%\" />\n<br>\n\nIts equation of motion can be stated as:\n<br>\n$$m\\cdot\\ddot{x}+b\\cdot\\dot{x}+k\\cdot{x}=F$$\n<br>\nAfter the Laplace transformation of the differential equation, the transfer function can be expressed as:\n<br>\n$$G(s)=\\frac{1}{m\\cdot s^2 +b\\cdot s + k}$$\n<br>\nYour task is to choose a controller type, and tune it to acceptable levels of performance!\n\n<b>First, choose a system model!</b><br>\nToggle between different realistic models with randomly preselected values (buttons *Model 1* - *Model 6*). By clicking the *Preset* button default, valid predetermined controller parameters are set and cannot be tuned further.", "_____no_output_____" ] ], [ [ "# Figure definition\n\nfig1, ((f1_ax1), (f1_ax2)) = plt.subplots(2, 1)\nfig1.set_size_inches((9.8, 5))\nfig1.set_tight_layout(True)\n\nf1_line1, = f1_ax1.plot([], [])\nf1_line2, = f1_ax2.plot([], []) \n\nf1_ax1.grid(which='both', axis='both', color='lightgray')\nf1_ax2.grid(which='both', axis='both', color='lightgray')\n\nf1_ax1.autoscale(enable=True, axis='both', tight=True)\nf1_ax2.autoscale(enable=True, axis='both', tight=True)\n\nf1_ax1.set_title('Bode magnitude plot', fontsize=11)\nf1_ax1.set_xscale('log')\nf1_ax1.set_xlabel(r'$f\\/$[Hz]', labelpad=0, fontsize=10)\nf1_ax1.set_ylabel(r'$A\\/$[dB]', labelpad=0, fontsize=10)\nf1_ax1.tick_params(axis='both', which='both', pad=0, labelsize=8)\n\nf1_ax2.set_title('Bode phase plot', fontsize=11)\nf1_ax2.set_xscale('log')\nf1_ax2.set_xlabel(r'$f\\/$[Hz]', labelpad=0, fontsize=10)\nf1_ax2.set_ylabel(r'$\\phi\\/$[°]', labelpad=0, fontsize=10)\nf1_ax2.tick_params(axis='both', which='both', pad=0, labelsize=8)\n\n# System parameters\n\ndef build_base_model(m, k, b):\n \n W_sys = c.tf([1], [m, b, k])\n \n print('System transfer function:')\n print(W_sys)\n \n # System analysis\n \n poles = c.pole(W_sys) # Poles\n \n print('System poles:\\n')\n print(poles)\n \n global f1_line1, f1_line2\n \n f1_ax1.lines.remove(f1_line1)\n f1_ax2.lines.remove(f1_line2)\n \n mag, phase, omega = c.bode_plot(W_sys, Plot=False) # Bode-plot\n \n f1_line1, = f1_ax1.plot(omega/2/np.pi, 20*np.log10(mag), lw=1, color='blue')\n f1_line2, = f1_ax2.plot(omega/2/np.pi, phase*180/np.pi, lw=1, color='blue') \n \n f1_ax1.relim()\n f1_ax2.relim()\n f1_ax1.autoscale_view()\n f1_ax2.autoscale_view()\n \n# GUI widgets\n\ntypeSelect = w.ToggleButtons(\n options=[('Model 1', 0), ('Model 2', 1), ('Model 3', 2), ('Model 4', 3), ('Model 5', 4), ('Model 6', 5), ('Preset', -1)],\n value =-1, description='System: ', layout=w.Layout(width='60%'))\n\nm_slider = w.FloatLogSlider(value=0.5, base=10, min=-3, max=3, description='m [kg] :', continuous_update=False,\n layout=w.Layout(width='auto', flex='5 5 auto'))\nk_slider = w.FloatLogSlider(value=100, base=10, min=-2, max=4, description='k [N/m] :', continuous_update=False,\n layout=w.Layout(width='auto', flex='5 5 auto'))\nb_slider = w.FloatLogSlider(value=50, base=10, min=-2, max=4, description='b [Ns/m] :', continuous_update=False,\n layout=w.Layout(width='auto', flex='5 5 auto'))\n\ninput_data = w.interactive_output(build_base_model, {'m':m_slider, 'k':k_slider, 'b':b_slider})\n\ndef update_sliders(index):\n global m_slider, k_slider, b_slider\n \n mval = [0.05, 0.1, 0.25, 0.5, 1, 5, 0.25]\n kval = [1.25, 10, 100, 10, 50, 1000, 50]\n bval = [1, 0.5, 2, 10, 10, 20, 1]\n \n m_slider.value = mval[index]\n k_slider.value = kval[index]\n b_slider.value = bval[index]\n \n if index == -1:\n m_slider.disabled = True\n k_slider.disabled = True\n b_slider.disabled = True\n else:\n m_slider.disabled = False\n k_slider.disabled = False\n b_slider.disabled = False\n \ninput_data2 = w.interactive_output(update_sliders, {'index':typeSelect})\n\ndisplay(typeSelect, input_data2)\ndisplay(w.HBox([m_slider, k_slider, b_slider]), input_data)", "_____no_output_____" ] ], [ [ "Depending on your selection, the system is either under- or overdamped.\n<br>\n<b>Select an appropriate controller configuration! Which one is the best for your system? Why?<br>\nSet up your controller for the fastest settling time with at most 25% overshoot!</b>\n\nYou can turn on/off each of the I and D components, and if D is active, you can apply the first-order filter as well, based on the derivating time constant.", "_____no_output_____" ] ], [ [ "# PID position control\n\nfig2, ((f2_ax1, f2_ax2, f2_ax3), (f2_ax4, f2_ax5, f2_ax6)) = plt.subplots(2, 3)\nfig2.set_size_inches((9.8, 5))\nfig2.set_tight_layout(True)\n\nf2_line1, = f2_ax1.plot([], [])\nf2_line2, = f2_ax2.plot([], []) \nf2_line3, = f2_ax3.plot([], [])\nf2_line4, = f2_ax4.plot([], []) \nf2_line5, = f2_ax5.plot([], [])\nf2_line6, = f2_ax6.plot([], [])\n\nf2_ax1.grid(which='both', axis='both', color='lightgray')\nf2_ax2.grid(which='both', axis='both', color='lightgray')\nf2_ax3.grid(which='both', axis='both', color='lightgray')\nf2_ax4.grid(which='both', axis='both', color='lightgray')\nf2_ax5.grid(which='both', axis='both', color='lightgray')\nf2_ax6.grid(which='both', axis='both', color='lightgray')\n\nf2_ax1.autoscale(enable=True, axis='both', tight=True)\nf2_ax2.autoscale(enable=True, axis='both', tight=True)\nf2_ax3.autoscale(enable=True, axis='both', tight=True)\nf2_ax4.autoscale(enable=True, axis='both', tight=True)\nf2_ax5.autoscale(enable=True, axis='both', tight=True)\nf2_ax6.autoscale(enable=True, axis='both', tight=True)\n\n\nf2_ax1.set_title('Closed loop step response', fontsize=9)\nf2_ax1.set_xlabel(r'$t\\/$[s]', labelpad=0, fontsize=8)\nf2_ax1.set_ylabel(r'$x\\/$[m]', labelpad=0, fontsize=8)\nf2_ax1.tick_params(axis='both', which='both', pad=0, labelsize=6)\n\nf2_ax2.set_title('Nyquist diagram', fontsize=9)\nf2_ax2.set_xlabel(r'Re', labelpad=0, fontsize=8)\nf2_ax2.set_ylabel(r'Im', labelpad=0, fontsize=8)\nf2_ax2.tick_params(axis='both', which='both', pad=0, labelsize=6)\n\nf2_ax3.set_title('Bode magniture plot', fontsize=9)\nf2_ax3.set_xscale('log')\nf2_ax3.set_xlabel(r'$f\\/$[Hz]', labelpad=0, fontsize=8)\nf2_ax3.set_ylabel(r'$A\\/$[dB]', labelpad=0, fontsize=8)\nf2_ax3.tick_params(axis='both', which='both', pad=0, labelsize=6)\n\nf2_ax4.set_title('Closed loop impulse response', fontsize=9)\nf2_ax4.set_xlabel(r'$t\\/$[s]', labelpad=0, fontsize=8)\nf2_ax4.set_ylabel(r'$x\\/$[m]', labelpad=0, fontsize=8)\nf2_ax4.tick_params(axis='both', which='both', pad=0, labelsize=6)\n\nf2_ax5.set_title('Load transfer step response', fontsize=9)\nf2_ax5.set_xlabel(r'$t\\/$[s]', labelpad=0, fontsize=8)\nf2_ax5.set_ylabel(r'$x\\/$[m]', labelpad=0, fontsize=8)\nf2_ax5.tick_params(axis='both', which='both', pad=0, labelsize=6)\n\nf2_ax6.set_title('Bode phase plot', fontsize=9)\nf2_ax6.set_xscale('log')\nf2_ax6.set_xlabel(r'$f\\/$[Hz]', labelpad=0, fontsize=8)\nf2_ax6.set_ylabel(r'$\\phi\\/$[°]', labelpad=0, fontsize=8)\nf2_ax6.tick_params(axis='both', which='both', pad=0, labelsize=6)\n\ndef position_control(Kp, Ti, Td, Fd, Ti0, Td0, Fd0, m, k, b):\n \n W_sys = c.tf([1], [m, b, k])\n \n # PID Controller\n \n P = Kp # Proportional term\n I = Kp / Ti # Integral term\n D = Kp * Td # Derivative term\n Td_f = Td / Fd # Derivative term filter\n \n W_PID = c.parallel(c.tf([P], [1]),\n c.tf([I * Ti0], [1 * Ti0, 1 * (not Ti0)]),\n c.tf([D * Td0, 0], [Td_f * Td0 * Fd0, 1])) # PID controller in time constant format\n \n W_open = c.series(W_PID, W_sys) # Open loop with two integrators added for position output\n W_closed = c.feedback(W_open, 1, -1) # Closed loop with negative feedback\n \n W_load = c.feedback(W_sys, W_PID, -1) # Transfer function of the load based errors \n \n # Display\n \n global f2_line1, f2_line2, f2_line3, f2_line4, f2_line5, f2_line6\n \n f2_ax1.lines.remove(f2_line1)\n f2_ax2.lines.remove(f2_line2)\n f2_ax3.lines.remove(f2_line3)\n f2_ax4.lines.remove(f2_line4)\n f2_ax5.lines.remove(f2_line5)\n f2_ax6.lines.remove(f2_line6)\n \n tout, yout = c.step_response(W_closed)\n f2_line1, = f2_ax1.plot(tout, yout, lw=1, color='blue') \n \n _, _, ob = c.nyquist_plot(W_open, Plot=False) # Small resolution plot to determine bounds \n real, imag, freq = c.nyquist_plot(W_open, omega=np.logspace(np.log10(ob[0]), np.log10(ob[-1]), 1000), Plot=False)\n f2_line2, = f2_ax2.plot(real, imag, lw=1, color='blue')\n \n mag, phase, omega = c.bode_plot(W_open, Plot=False)\n f2_line3, = f2_ax3.plot(omega/2/np.pi, 20*np.log10(mag), lw=1, color='blue')\n f2_line6, = f2_ax6.plot(omega/2/np.pi, phase*180/np.pi, lw=1, color='blue')\n\n tout, yout = c.impulse_response(W_closed)\n f2_line4, = f2_ax4.plot(tout, yout, lw=1, color='blue') \n\n tout, yout = c.step_response(W_load)\n f2_line5, = f2_ax5.plot(tout, yout, lw=1, color='blue')\n \n f2_ax1.relim()\n f2_ax2.relim()\n f2_ax3.relim()\n f2_ax4.relim()\n f2_ax5.relim()\n f2_ax6.relim()\n f2_ax1.autoscale_view()\n f2_ax2.autoscale_view()\n f2_ax3.autoscale_view()\n f2_ax4.autoscale_view()\n f2_ax5.autoscale_view()\n f2_ax6.autoscale_view()\n \ndef update_controller(index):\n global Kp_slider, Ti_slider, Td_slider, Fd_slider, Ti_button, Td_button, Fd_button\n \n if index == -1:\n Kp_slider.value = 100\n Td_slider.value = 0.05\n Fd_slider.value = 10\n Ti_button.value = False\n Td_button.value = True\n Fd_button.value = True\n \n Kp_slider.disabled = True\n Ti_slider.disabled = True\n Td_slider.disabled = True\n Fd_slider.disabled = True\n Ti_button.disabled = True\n Td_button.disabled = True\n Fd_button.disabled = True\n else:\n Kp_slider.disabled = False\n Ti_slider.disabled = False\n Td_slider.disabled = False\n Fd_slider.disabled = False\n Ti_button.disabled = False\n Td_button.disabled = False\n Fd_button.disabled = False\n \n# GUI widgets\n\nKp_slider = w.FloatLogSlider(value=0.5, base=10, min=-1, max=4, description='Kp:', continuous_update=False,\n layout=w.Layout(width='auto', flex='5 5 auto'))\nTi_slider = w.FloatLogSlider(value=0.0035, base=10, min=-4, max=1, description='', continuous_update=False,\n layout=w.Layout(width='auto', flex='5 5 auto'))\nTd_slider = w.FloatLogSlider(value=1, base=10, min=-4, max=1, description='', continuous_update=False,\n layout=w.Layout(width='auto', flex='5 5 auto'))\nFd_slider = w.FloatLogSlider(value=1, base=10, min=0, max=3, description='', continuous_update=False,\n layout=w.Layout(width='auto', flex='5 5 auto'))\n\nTi_button = w.ToggleButton(value=True, description='Ti',\n layout=w.Layout(width='auto', flex='1 1 0%'))\nTd_button = w.ToggleButton(value=False, description='Td',\n layout=w.Layout(width='auto', flex='1 1 0%'))\nFd_button = w.ToggleButton(value=False, description='Fd',\n layout=w.Layout(width='auto', flex='1 1 0%'))\n\ninput_data = w.interactive_output(position_control, {'Kp': Kp_slider, 'Ti': Ti_slider, 'Td': Td_slider,\n 'Fd': Fd_slider, 'Ti0' : Ti_button, 'Td0': Td_button,\n 'Fd0': Fd_button, 'm':m_slider, 'k':k_slider, 'b':b_slider})\n\nw.interactive_output(update_controller, {'index': typeSelect})\n\ndisplay(w.HBox([Kp_slider, Ti_button, Ti_slider, Td_button, Td_slider, Fd_button, Fd_slider]), input_data)", "_____no_output_____" ] ], [ [ "In the following simulation, you can observe the movement of your system based on your controller setup. You can create reference signals and even apply some disturbance and see how the system reacts.\n\n<b>Is your configuration suitable for signal-following? Readjust your controller so that it can follow a sine wave acceptably!</b>\n<br><br>\n<i>(The animations are scaled to fit the frame through the whole simulation. Because of this, unstable solutions might not seem to move until the very last second.)</i>", "_____no_output_____" ] ], [ [ "# Simulation data\n\nanim_fig = plt.figure()\nanim_fig.set_size_inches((9.8, 6))\nanim_fig.set_tight_layout(True)\n\nanim_ax1 = anim_fig.add_subplot(211)\nanim_ax2 = anim_ax1.twinx()\n\nframe_count=1000\n\nl1 = anim_ax1.plot([], [], lw=1, color='blue')\nl2 = anim_ax1.plot([], [], lw=2, color='red')\nl3 = anim_ax2.plot([], [], lw=1, color='grey')\n\nline1 = l1[0]\nline2 = l2[0]\nline3 = l3[0]\n\nanim_ax1.legend(l1+l2+l3, ['Reference [m]', 'Output [m]', 'Load [N]'], loc=1)\n\nanim_ax1.set_title('Time response simulation', fontsize=12)\nanim_ax1.set_xlabel(r'$t\\/$[s]', labelpad=0, fontsize=10)\nanim_ax1.set_ylabel(r'$x\\/$[m]', labelpad=0, fontsize=10)\nanim_ax1.tick_params(axis='both', which='both', pad=0, labelsize=8)\nanim_ax2.set_ylabel(r'$F\\/$[N]', labelpad=0, fontsize=10)\nanim_ax2.tick_params(axis='both', which='both', pad=0, labelsize=8)\n\nanim_ax1.grid(which='both', axis='both', color='lightgray')\n\nT_plot = []\nX_plot = []\nL_plot = []\nR_plot = []\n\n# Scene data\n\nscene_ax = anim_fig.add_subplot(212)\nscene_ax.set_xlim((-3, 4))\nscene_ax.set_ylim((-0.5, 1.5))\nscene_ax.axis('off')\n\nscene_ax.plot([-2.5, -2.3, -2.3, -0.3, -2.3, -2.3, -0.3], [0.75, 0.75, 0.9, 0.9, 0.9, 0.6, 0.6], lw=2, color='blue', zorder=0)\nscene_ax.plot([-2.5, -2.3], [0.25, 0.25], lw=2, color='red', zorder=0)\nscene_ax.plot([-2.5, -2.5], [1.25, -0.25], lw=4, color='gray', zorder=2)\n\nscene_ax.text(-1.3, 1, 'b', fontsize=14, color='blue', va='bottom', zorder=5)\nscene_ax.text(-1.3, 0, 'k', fontsize=14, color='red', va='top', zorder=5)\n\nb_line, = scene_ax.plot([], [], lw=2, color='blue')\nk_line, = scene_ax.plot([], [], lw=2, color='red')\n\nm_text = scene_ax.text(1.75, 0.5, 'm', fontsize=14, color='green', va='center', ha='center', zorder=5)\nm_box = patches.Rectangle((1, 0), 1.5, 1, lw=2, color='green', fill=False, zorder=10)\nscene_ax.add_patch(m_box)\n\nx_arrow = scene_ax.arrow(1.75, -0.5, 0, 0.25, color='blue', head_width=0.1,\n length_includes_head=True, lw=1, fill=False, zorder=5)\nr_arrow = scene_ax.arrow(1.75, -0.5, 0, 0.25, color='red', head_width=0.1,\n length_includes_head=True, lw=1, fill=False, zorder=5)\nbase_arrow = x_arrow.xy\n\npos_var = []\nref_var = []\n\n#Simulation function\n\ndef simulation(Kp, Ti, Td, Fd, Ti0, Td0, Fd0, m, k, b, T, dt, X, Xf, Xa, Xo, L, Lf, La, Lo):\n \n # Controller\n P = Kp # Proportional term\n I = Kp / Ti # Integral term\n D = Kp * Td # Derivative term\n Td_f = Td / Fd # Derivative term filter\n \n W_PID = c.parallel(c.tf([P], [1]),\n c.tf([I * Ti0], [1 * Ti0, 1 * (not Ti0)]),\n c.tf([D * Td0, 0], [Td_f * Td0 * Fd0, 1])) # PID controller\n \n # System\n W_sys = c.tf([1], [m, b, k])\n \n # Model\n W_open = c.series(W_PID, W_sys) # Open loop with two integrators added for position output\n W_closed = c.feedback(W_open, 1, -1) # Closed loop with negative feedback\n \n W_load = c.feedback(W_sys, W_PID, -1) # Transfer function of the load based errors \n \n # Reference and disturbance signals\n\n T_sim = np.arange(0, T, dt, dtype=np.float64)\n \n if X == 0: # Constant reference\n X_sim = np.full_like(T_sim, Xa * Xo)\n elif X == 1: # Sine wave reference\n X_sim = (np.sin(2 * np.pi * Xf * T_sim) + Xo) * Xa\n elif X == 2: # Square wave reference\n X_sim = (np.sign(np.sin(2 * np.pi * Xf * T_sim)) + Xo) * Xa\n \n if L == 0: # Constant load\n L_sim = np.full_like(T_sim, La * Lo)\n elif L == 1: # Sine wave load\n L_sim = (np.sin(2 * np.pi * Lf * T_sim) + Lo) * La\n elif L == 2: # Square wave load\n L_sim = (np.sign(np.sin(2 * np.pi * Lf * T_sim)) + Lo) * La\n elif L_type.value == 3: # Noise form load\n L_sim = np.interp(T_sim, np.linspace(0, T, int(T * Lf) + 2),\n np.random.normal(loc=(Lo * La), scale=La, size=int(T * Lf) + 2))\n \n # System response\n \n Tx, youtx, xoutx = c.forced_response(W_closed, T_sim, X_sim)\n Tl, youtl, xoutl = c.forced_response(W_load, T_sim, L_sim)\n R_sim = np.nan_to_num(youtx + youtl)\n \n # Display\n \n XR_max = max(np.amax(np.absolute(np.concatenate((X_sim, R_sim)))), Xa)\n L_max = max(np.amax(np.absolute(L_sim)), La)\n \n anim_ax1.set_xlim((0, T))\n anim_ax1.set_ylim((-1.2 * XR_max, 1.2 * XR_max))\n anim_ax2.set_ylim((-1.5 * L_max, 1.5 * L_max))\n \n global T_plot, X_plot, L_plot, R_plot, pos_var, ref_var\n \n T_plot = np.linspace(0, T, frame_count, dtype=np.float32)\n X_plot = np.interp(T_plot, T_sim, X_sim)\n L_plot = np.interp(T_plot, T_sim, L_sim)\n R_plot = np.interp(T_plot, T_sim, R_sim)\n \n pos_var = R_plot/XR_max\n ref_var = X_plot/XR_max\n \ndef anim_init():\n line1.set_data([], [])\n line2.set_data([], [])\n line3.set_data([], [])\n \n b_line.set_data([], [])\n k_line.set_data([], [])\n \n x_arrow.set_xy(base_arrow)\n r_arrow.set_xy(base_arrow)\n \n m_text.set_position((1.75, 0.5))\n m_box.set_xy((1, 0))\n \n return (line1, line2, line3, m_text, m_box, b_line, k_line,)\n\ndef animate(i):\n line1.set_data(T_plot[0:i], X_plot[0:i])\n line2.set_data(T_plot[0:i], R_plot[0:i])\n line3.set_data(T_plot[0:i], L_plot[0:i])\n \n b_line.set_data([-1.3, -1.3, -1.3, 1]+pos_var[i], [0.66, 0.84, 0.75, 0.75])\n k_line.set_data(np.append(np.array([0, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 22])*(pos_var[i]+2)/20-2.3, pos_var[i]+1),\n [0.25, 0.34, 0.16, 0.34, 0.16, 0.34, 0.16, 0.34, 0.16, 0.34, 0.16, 0.34, 0.25, 0.25])\n \n x_arrow.set_xy(base_arrow+[ref_var[i], 0])\n r_arrow.set_xy(base_arrow+[pos_var[i], 0])\n \n m_text.set_position((pos_var[i]+1.75, 0.5))\n m_box.set_x(pos_var[i]+1)\n \n return (line1, line2, line3, m_text, m_box, b_line, k_line,)\n\nanim = animation.FuncAnimation(anim_fig, animate, init_func=anim_init,\n frames=frame_count, interval=10, blit=True,\n repeat=True)\n\n# Controllers\n\nT_slider = w.FloatLogSlider(value=10, base=10, min=-0.7, max=1, step=0.01,\n description='Duration [s]:', continuous_update=False,\n orientation='vertical', layout=w.Layout(width='auto', height='auto', flex='1 1 auto'))\n\ndt_slider = w.FloatLogSlider(value=0.1, base=10, min=-3, max=-1, step=0.01,\n description='Timestep [s]:', continuous_update=False,\n orientation='vertical', layout=w.Layout(width='auto', height='auto', flex='1 1 auto'))\n \nX_type = w.Dropdown(options=[('Constant', 0), ('Sine', 1), ('Square', 2)], value=1,\n description='Reference: ', continuous_update=False, layout=w.Layout(width='auto', flex='3 3 auto')) \nXf_slider = w.FloatLogSlider(value=0.5, base=10, min=-2, max=2, step=0.01,\n description='Frequency [Hz]:', continuous_update=False,\n orientation='vertical', layout=w.Layout(width='auto', height='auto', flex='1 1 auto'))\nXa_slider = w.FloatLogSlider(value=1, base=10, min=-2, max=2, step=0.01,\n description='Amplitude [m]:', continuous_update=False,\n orientation='vertical', layout=w.Layout(width='auto', height='auto', flex='1 1 auto'))\nXo_slider = w.FloatSlider(value=0, min=-10, max=10, description='Offset/Ampl:', continuous_update=False,\n orientation='vertical', layout=w.Layout(width='auto', height='auto', flex='1 1 auto')) \n \nL_type = w.Dropdown(options=[('Constant', 0), ('Sine', 1), ('Square', 2), ('Noise', 3)], value=2,\n description='Load: ', continuous_update=False, layout=w.Layout(width='auto', flex='3 3 auto')) \nLf_slider = w.FloatLogSlider(value=1, base=10, min=-2, max=2, step=0.01,\n description='Frequency [Hz]:', continuous_update=False,\n orientation='vertical', layout=w.Layout(width='auto', height='auto', flex='1 1 auto'))\nLa_slider = w.FloatLogSlider(value=0.1, base=10, min=-2, max=2, step=0.01,\n description='Amplitude [N]:', continuous_update=False,\n orientation='vertical', layout=w.Layout(width='auto', height='auto', flex='1 1 auto'))\nLo_slider = w.FloatSlider(value=0, min=-10, max=10, description='Offset/Ampl:', continuous_update=False,\n orientation='vertical', layout=w.Layout(width='auto', height='auto', flex='1 1 auto'))\n\ninput_data = w.interactive_output(simulation, {'Kp': Kp_slider, 'Ti': Ti_slider, 'Td': Td_slider,\n 'Fd': Fd_slider, 'Ti0' : Ti_button, 'Td0': Td_button,\n 'Fd0': Fd_button,\n 'm':m_slider, 'k':k_slider, 'b':b_slider,\n 'T': T_slider, 'dt': dt_slider,\n 'X': X_type, 'Xf': Xf_slider, 'Xa': Xa_slider, 'Xo': Xo_slider,\n 'L': L_type, 'Lf': Lf_slider, 'La': La_slider, 'Lo': Lo_slider})\n\ndisplay(w.HBox([w.HBox([T_slider, dt_slider], layout=w.Layout(width='25%')),\n w.Box([], layout=w.Layout(width='5%')),\n w.VBox([X_type, w.HBox([Xf_slider, Xa_slider, Xo_slider])], layout=w.Layout(width='30%')),\n w.Box([], layout=w.Layout(width='5%')),\n w.VBox([L_type, w.HBox([Lf_slider, La_slider, Lo_slider])], layout=w.Layout(width='30%'))],\n layout=w.Layout(width='100%', justify_content='center')), input_data)\n", "_____no_output_____" ] ], [ [ "The duration parameter controls the simulated timeframe and does not affect the runtime of the animation. In contrast, the timestep controls the model sampling and can refine the results in exchange for higher computational resources.", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
c52dbaf12cc37e179ba2f816ab3e0acaebe36a53
109,857
ipynb
Jupyter Notebook
Untitled.ipynb
anskipper/jupyter-notebooks
3f87618673fe8caaf313267a19ca869bc9bec98c
[ "MIT" ]
null
null
null
Untitled.ipynb
anskipper/jupyter-notebooks
3f87618673fe8caaf313267a19ca869bc9bec98c
[ "MIT" ]
null
null
null
Untitled.ipynb
anskipper/jupyter-notebooks
3f87618673fe8caaf313267a19ca869bc9bec98c
[ "MIT" ]
null
null
null
362.564356
91,092
0.924556
[ [ [ "import pickle\nimport datetime as dt\nimport matplotlib.pyplot as plt\nfrom flowmeterAnalysis import readFiles", "_____no_output_____" ], [ "homeDir = 'P:\\\\PW-WATER SERVICES\\\\TECHNICAL SERVICES\\\\Anna'\npickleLocation = homeDir + '\\\\2018\\\\Python Objects\\\\'\n\nwith open(pickleLocation + 'flowDict.pickle', 'rb') as handle:\n flowDict = pickle.load(handle)", "_____no_output_____" ], [ "fmname = 'BC32'\n#[dt.datetime(2018,1,1):dt.datetime(2018,2,1)]\nQ = readFiles.reorganizeByTime(\n df = flowDict[fmname],\n colVal = 'Q (MGD)')\n", "_____no_output_____" ], [ "import matplotlib\n\nfont = {'family' : 'DejaVu Sans',\n 'weight' : 'normal',\n 'size' : 11}\n\nmatplotlib.rc('font', **font)\ncolors = {1: 'xkcd:windows blue',\n 2: 'xkcd:perrywinkle',\n 3: 'xkcd:pale olive',\n 4: 'xkcd:faded green',\n 5: 'xkcd:saffron',\n 6: 'xkcd:faded orange',\n 7: 'xkcd:yellow ochre',\n 8: 'xkcd:brown orange',\n 9: 'xkcd:greyish pink',\n 10: 'xkcd:rose red',\n 11: 'xkcd:purple red',\n 12: 'xkcd:dusty purple'}\n\nimport pandas as pd\ndatecols = pd.to_datetime(Q.columns)\n\nfig,ax = plt.subplots(figsize = (12, 12/1.68))\nax.fill_between(range(1,len(Q.columns) + 1),\n Q.min(),\n Q.max(),\n alpha = 0.5,\n facecolor='xkcd:light grey')\nax.plot(range(1,len(Q.columns) + 1),\n Q.median(),\n linewidth = 0.75,\n color = 'xkcd:grey')\nprevLim = 0\nmonthList = list(set(pd.to_datetime(Q.columns).month))\nlabels = []\nlabelColor = []\nnewlabels = []\nfor month in monthList:\n q = Q.loc[:,datecols.month == month]\n ax.plot(range(prevLim + 1,prevLim + len(q.columns) + 1),\n q.median().values,\n linewidth = 0,\n marker = '.',\n markersize = 6,\n color = colors[month])\n prevLim += q.columns[-1].day\n # create labels\n newlabels.append(datecols[datecols.month == month]\n .day[datecols[datecols.month == month]\n .dayofweek == 0])\n labels.extend(newlabels[-1])\n \n\n#for monthLabels in newlabels:\n# for idx, day in enumerate(newlabels[-1]):\n# plt.gca().get_xticklabels()[idx].set_color(colors[month])\nax.spines['top'].set_visible(False)\nax.spines['right'].set_visible(False)\nax.spines['bottom'].set_color('xkcd:grey')\nax.spines['left'].set_color('xkcd:grey')\nyticks = [Q.min().min(), \n Q.quantile(0.25).mean(),\n Q.mean().mean(), \n Q.median().mean(), \n Q.quantile(0.75).mean(), \n Q.max().max()]\nplt.yticks(ticks = yticks,\n color = 'xkcd:dark grey')\nplt.xticks(\n ticks = range(1,len(Q.columns) + 1,7),\n labels = labels)\nax.set_xlabel('Day of Month')\nax.set_ylabel('Gross Q (MGD)')\nax.set_title(fmname)\nstartIdx = 0\nfor month, monthLabels in zip(monthList, newlabels):\n for idx in range(startIdx, startIdx + len(monthLabels)):\n plt.gca().get_xticklabels()[idx].set_color(colors[month])\n startIdx += len(monthLabels)\nplt.tight_layout()\nplt.savefig('H:\\\\Weather Reports\\\\grossQ_2018_' + fmname + '.png')", "_____no_output_____" ], [ "font = {'family' : 'DejaVu Sans',\n 'weight' : 'normal',\n 'size' : 9}\n\nmatplotlib.rc('font', **font)\n\nrain = [0.84, 4.20, 3.26, 15.82]\ncolor = ['#afc9e5','xkcd:grey','xkcd:windows blue','#afc9e5']\nmonth = 1\n\nfig, ax = plt.subplots(figsize = (2,2))\nax.bar(x = range(1,len(rain)+1),\n height = rain,\n color = color)\nax.spines['top'].set_visible(False)\nax.spines['right'].set_visible(False)\nax.spines['bottom'].set_color('xkcd:grey')\nax.spines['left'].set_visible(False)\nplt.xticks(ticks = range(1,len(rain)+1),\n labels = [])\nplt.yticks(rain)\nax.yaxis.grid(True,\n color='white',\n linewidth = 1)\nfor idx, rainTotal in enumerate(rain):\n plt.gca().get_yticklabels()[idx].set_color(color[idx])\nplt.tight_layout()", "_____no_output_____" ], [ "font = {'family' : 'DejaVu Sans',\n 'weight' : 'normal',\n 'size' : 22}\n\ncolor = {\n 1 : ['#afc9e5','xkcd:grey','xkcd:windows blue','#afc9e5'],\n 2 : ['#d2d1f5','xkcd:grey','xkcd:perrywinkle','#d2d1f5'],\n 3 : ['#e3eacc','xkcd:grey','xkcd:pale olive','#e3eacc'],\n 4 : ['#cae0c7','xkcd:grey','xkcd:faded green','#cae0c7'],\n 5 : ['#fee09c','xkcd:grey','xkcd:saffron','#fee09c'],\n 6 : ['#f9d4b7','xkcd:grey','xkcd:faded orange','#f9d4b7'],\n 7 : ['#ead79b','xkcd:grey','xkcd:yellow ochre','#ead79b'],\n 8 : ['#e3c399','xkcd:grey','xkcd:brown orange','#e3c399'],\n 9 : ['#e9d1d4','xkcd:grey','xkcd:greyish pink','#e9d1d4'],\n 10 : ['#e599b1','xkcd:grey','xkcd:rose red','#e599b1'],\n 11 : ['#d699b5','xkcd:grey','xkcd:purple red','#d699b5'],\n 12 : ['#d3bfcf','xkcd:grey','xkcd:dusty purple','#d3bfcf']\n }\n# YEAR 2018\n# could just leave min, max, and normal and INSERT list.insert(num,pos) the actual rain total?\n# should I also add the 24-hr max?...no?\nrain = {\n 1 : [0.84, 4.20, 3.26, 15.82],\n 2 : [0.84, 4.67, 6.11, 15.82],\n 3 : [0.88, 4.81, 4.86, 13.28],\n 4 : [0.35, 3.36, 6.53, 11.86],\n 5 : [0.30, 3.67, 4.45, 9.94],\n 6 : [0.16, 3.95, 3.86, 11.21],\n 7 : [0.56, 5.27, 8.04, 17.71],\n 8 : [0.02, 3.90, 7.59, 10.02],\n 9 : [0.04, 4.47, 1.48, 14.26],\n 10 : [0.00, 3.41, 4.75, 11.04],\n 11 : [0.18, 4.1, 7.27, 15.72],\n 12 : [0.60, 3.9, 11.83, 12.94],\n}\n\nfig, ax = plt.subplots(\n nrows = 1, \n ncols = 12, \n figsize = (12,2),\n sharey = True)\nfor idx, month in enumerate(rain):\n ax[idx].bar(x = range(1,len(rain[month])+1),\n height = rain[month],\n color = color[month])\n ax[idx].spines['top'].set_visible(False)\n ax[idx].spines['right'].set_visible(False)\n ax[idx].spines['bottom'].set_color('xkcd:grey')\n ax[idx].spines['left'].set_visible(False)\n for rainVal in rain[month]:\n ax[idx].plot(\n [1,len(rain[month])+1],\n [rainVal, rainVal],\n color = 'white',\n linewidth = 1)\n plt.sca(ax[idx]) \n plt.xticks(ticks = range(1,len(rain[month])+1),\n labels = ['m','N','T','M'])\nax[0].set_ylabel('Rain (in)')\nplt.yticks(\n ticks = [0,5,10,15],\n color = 'xkcd:grey')\nplt.tight_layout()\nplt.savefig('H:\\\\Weather Reports\\\\rainbar.png')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
c52dd272aafd9272021f1c0ef12c0877b12565a7
3,944
ipynb
Jupyter Notebook
lectures/L11/Exercise_1.ipynb
HeyItsRiddhi/cs207_riddhi_shah
18d7d6f1fcad213ce35a93ee33c03620f8b06b65
[ "MIT" ]
null
null
null
lectures/L11/Exercise_1.ipynb
HeyItsRiddhi/cs207_riddhi_shah
18d7d6f1fcad213ce35a93ee33c03620f8b06b65
[ "MIT" ]
null
null
null
lectures/L11/Exercise_1.ipynb
HeyItsRiddhi/cs207_riddhi_shah
18d7d6f1fcad213ce35a93ee33c03620f8b06b65
[ "MIT" ]
null
null
null
29.878788
231
0.51217
[ [ [ "# Exercise 1\nRead and parse the chemical reactions `.xml` input file `rxns.xml`.\n\n\n1. Collect the species into a species list. My output is `['H', 'O', 'OH', 'H2', 'O2']`.\n \n Some notes and hints:\n * **Hint:** For this `.xml` format you should have a loop over the `phase` element.\n * **Hint:** You can use the `find()` method to get the species array.\n\n2. Calculate and print out the Arrhenius reaction rate coefficients using $R = 8.314$ and $T = 1500$.\n\n Some notes and hints:\n * **Hint:** For this `.xml` format you should have loops over the `reactionData` element, the `reaction` element, the `rateCoeff` element, and the `Arrhenius` element using the `findall()` method discussed in lecture.\n * **Hint:** You can use the `find()` method to get the reaction rate coefficients.\n * My solution is:\n \n `k for reaction01 = 6.8678391864294477e+05\n k for reaction02 = 2.3105559199959813e+06`", "_____no_output_____" ] ], [ [ "import xml.etree.ElementTree as ET\nreactions = ET.parse('rxns.xml')\nctml = reactions.getroot()\nfor phase in ctml.findall('phase'):\n species = phase.find('speciesArray').text\n\nspecies = species.split()\nprint(species)\n", "['H', 'O', 'OH', 'H2', 'O2']\n" ], [ "import math\nimport numpy as np\n\ndef arrhenius_rate(params, T, R=8.314):\n #Check that A,b,E is passed as a list\n if(len(params) != 3 or type(params) != list):\n print(\"ERROR: First argument needs to be a list of 3\")\n else:\n [A,b,E] = params\n #Check that A,b,E is passed are numbers\n if ((type(A) != int and type(A) != float) or \n (type(b) != int and type(b) != float) or\n (type(E) != int and type(E) != float)):\n print(\"Error: All numbers in list must be of type integer or float\")\n else:\n #Calculate k(T)\n kt = A*T**(b)*np.exp(-E/(R*T))\n return kt", "_____no_output_____" ], [ "#get all reaction elements\nfor i, reaction in enumerate(ctml.find('reactionData').findall('reaction')):\n Arrhenius = reaction.find('rateCoeff').find('Arrhenius')\n A = float(Arrhenius.find('A').text)\n b = float(Arrhenius.find('b').text)\n E = float(Arrhenius.find('E').text)\n \n #calculate rate\n k = arrhenius_rate([A,b,E],1500)\n \n #print rate\n print(\"k for reaction{} = {}\".format(i,k))", "k for reaction0 = 686783.9186429448\nk for reaction1 = 2310555.9199959813\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ] ]
c52dfd7f9c7d216e2d218f720606d6d17ee29460
195,785
ipynb
Jupyter Notebook
01_Simple_Linear_Model.ipynb
bianan/TensorFlow-Tutorials
d4912807267515926d74f066dabaa33fdcfa9519
[ "MIT" ]
null
null
null
01_Simple_Linear_Model.ipynb
bianan/TensorFlow-Tutorials
d4912807267515926d74f066dabaa33fdcfa9519
[ "MIT" ]
null
null
null
01_Simple_Linear_Model.ipynb
bianan/TensorFlow-Tutorials
d4912807267515926d74f066dabaa33fdcfa9519
[ "MIT" ]
1
2018-11-03T02:41:58.000Z
2018-11-03T02:41:58.000Z
140.953924
26,620
0.88045
[ [ [ "# TensorFlow Tutorial #01\n# Simple Linear Model\n\nby [Magnus Erik Hvass Pedersen](http://www.hvass-labs.org/)\n/ [GitHub](https://github.com/Hvass-Labs/TensorFlow-Tutorials) / [Videos on YouTube](https://www.youtube.com/playlist?list=PL9Hr9sNUjfsmEu1ZniY0XpHSzl5uihcXZ)", "_____no_output_____" ], [ "## Introduction\n\nThis tutorial demonstrates the basic workflow of using TensorFlow with a simple linear model. After loading the so-called MNIST data-set with images of hand-written digits, we define and optimize a simple mathematical model in TensorFlow. The results are then plotted and discussed.\n\nYou should be familiar with basic linear algebra, Python and the Jupyter Notebook editor. It also helps if you have a basic understanding of Machine Learning and classification.", "_____no_output_____" ], [ "## Imports", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix", "_____no_output_____" ] ], [ [ "This was developed using Python 3.6.1 (Anaconda) and TensorFlow version:", "_____no_output_____" ] ], [ [ "tf.__version__\nnp.__version__\n", "_____no_output_____" ] ], [ [ "## Load Data", "_____no_output_____" ], [ "The MNIST data-set is about 12 MB and will be downloaded automatically if it is not located in the given path.", "_____no_output_____" ] ], [ [ "from tensorflow.examples.tutorials.mnist import input_data\ndata = input_data.read_data_sets(\"data/MNIST/\", one_hot=True)", "WARNING:tensorflow:From <ipython-input-5-d748032e7ab8>:2: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use alternatives such as official/mnist/dataset.py from tensorflow/models.\nWARNING:tensorflow:From /Users/anbian/anaconda3/envs/tf-pytorch/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease write your own downloading logic.\nWARNING:tensorflow:From /Users/anbian/anaconda3/envs/tf-pytorch/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/base.py:252: _internal_retry.<locals>.wrap.<locals>.wrapped_fn (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use urllib or similar directly.\nSuccessfully downloaded train-images-idx3-ubyte.gz 9912422 bytes.\nWARNING:tensorflow:From /Users/anbian/anaconda3/envs/tf-pytorch/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use tf.data to implement this functionality.\nExtracting data/MNIST/train-images-idx3-ubyte.gz\nSuccessfully downloaded train-labels-idx1-ubyte.gz 28881 bytes.\nWARNING:tensorflow:From /Users/anbian/anaconda3/envs/tf-pytorch/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use tf.data to implement this functionality.\nExtracting data/MNIST/train-labels-idx1-ubyte.gz\nWARNING:tensorflow:From /Users/anbian/anaconda3/envs/tf-pytorch/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use tf.one_hot on tensors.\nSuccessfully downloaded t10k-images-idx3-ubyte.gz 1648877 bytes.\nExtracting data/MNIST/t10k-images-idx3-ubyte.gz\nSuccessfully downloaded t10k-labels-idx1-ubyte.gz 4542 bytes.\nExtracting data/MNIST/t10k-labels-idx1-ubyte.gz\nWARNING:tensorflow:From /Users/anbian/anaconda3/envs/tf-pytorch/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:290: DataSet.__init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use alternatives such as official/mnist/dataset.py from tensorflow/models.\n" ] ], [ [ "The MNIST data-set has now been loaded and consists of 70.000 images and associated labels (i.e. classifications of the images). The data-set is split into 3 mutually exclusive sub-sets. We will only use the training and test-sets in this tutorial.", "_____no_output_____" ] ], [ [ "print(\"Size of:\")\nprint(\"- Training-set:\\t\\t{}\".format(len(data.train.labels)))\nprint(\"- Test-set:\\t\\t{}\".format(len(data.test.labels)))\nprint(\"- Validation-set:\\t{}\".format(len(data.validation.labels)))", "Size of:\n- Training-set:\t\t55000\n- Test-set:\t\t10000\n- Validation-set:\t5000\n" ] ], [ [ "### One-Hot Encoding", "_____no_output_____" ], [ "The data-set has been loaded as so-called One-Hot encoding. This means the labels have been converted from a single number to a vector whose length equals the number of possible classes. All elements of the vector are zero except for the $i$'th element which is one and means the class is $i$. For example, the One-Hot encoded labels for the first 5 images in the test-set are:", "_____no_output_____" ] ], [ [ "data.test.labels[0:5, :]", "_____no_output_____" ] ], [ [ "We also need the classes as single numbers for various comparisons and performance measures, so we convert the One-Hot encoded vectors to a single number by taking the index of the highest element. Note that the word 'class' is a keyword used in Python so we need to use the name 'cls' instead.", "_____no_output_____" ] ], [ [ "data.test.cls = np.array([label.argmax() for label in data.test.labels])", "_____no_output_____" ] ], [ [ "We can now see the class for the first five images in the test-set. Compare these to the One-Hot encoded vectors above. For example, the class for the first image is 7, which corresponds to a One-Hot encoded vector where all elements are zero except for the element with index 7.", "_____no_output_____" ] ], [ [ "data.test.cls[0:5]", "_____no_output_____" ] ], [ [ "### Data dimensions", "_____no_output_____" ], [ "The data dimensions are used in several places in the source-code below. In computer programming it is generally best to use variables and constants rather than having to hard-code specific numbers every time that number is used. This means the numbers only have to be changed in one single place. Ideally these would be inferred from the data that has been read, but here we just write the numbers.", "_____no_output_____" ] ], [ [ "# We know that MNIST images are 28 pixels in each dimension.\nimg_size = 28\n\n# Images are stored in one-dimensional arrays of this length.\nimg_size_flat = img_size * img_size\n\n# Tuple with height and width of images used to reshape arrays.\nimg_shape = (img_size, img_size)\n\n# Number of classes, one class for each of 10 digits.\nnum_classes = 10\n\nimg_shape", "_____no_output_____" ] ], [ [ "### Helper-function for plotting images", "_____no_output_____" ], [ "Function used to plot 9 images in a 3x3 grid, and writing the true and predicted classes below each image.", "_____no_output_____" ] ], [ [ "def plot_images(images, cls_true, cls_pred=None):\n assert len(images) == len(cls_true) == 9\n \n # Create figure with 3x3 sub-plots.\n fig, axes = plt.subplots(3, 3)\n fig.subplots_adjust(hspace=0.3, wspace=0.3)\n\n for i, ax in enumerate(axes.flat):\n # Plot image.\n ax.imshow(images[i].reshape(img_shape), cmap='binary')\n\n # Show true and predicted classes.\n if cls_pred is None:\n xlabel = \"True: {0}\".format(cls_true[i])\n else:\n xlabel = \"True: {0}, Pred: {1}\".format(cls_true[i], cls_pred[i])\n\n ax.set_xlabel(xlabel)\n \n # Remove ticks from the plot.\n ax.set_xticks([])\n ax.set_yticks([])\n \n # Ensure the plot is shown correctly with multiple plots\n # in a single Notebook cell.\n plt.show()", "_____no_output_____" ] ], [ [ "### Plot a few images to see if data is correct", "_____no_output_____" ] ], [ [ "# Get the first images from the test-set.\nimages = data.test.images[0:9]\n\n# Get the true classes for those images.\ncls_true = data.test.cls[0:9]\n\n# Plot the images and labels using our helper-function above.\nplot_images(images=images, cls_true=cls_true)", "_____no_output_____" ] ], [ [ "## TensorFlow Graph\n\nThe entire purpose of TensorFlow is to have a so-called computational graph that can be executed much more efficiently than if the same calculations were to be performed directly in Python. TensorFlow can be more efficient than NumPy because TensorFlow knows the entire computation graph that must be executed, while NumPy only knows the computation of a single mathematical operation at a time.\n\nTensorFlow can also automatically calculate the gradients that are needed to optimize the variables of the graph so as to make the model perform better. This is because the graph is a combination of simple mathematical expressions so the gradient of the entire graph can be calculated using the chain-rule for derivatives.\n\nTensorFlow can also take advantage of multi-core CPUs as well as GPUs - and Google has even built special chips just for TensorFlow which are called TPUs (Tensor Processing Units) and are even faster than GPUs.\n\nA TensorFlow graph consists of the following parts which will be detailed below:\n\n* Placeholder variables used to change the input to the graph.\n* Model variables that are going to be optimized so as to make the model perform better.\n* The model which is essentially just a mathematical function that calculates some output given the input in the placeholder variables and the model variables.\n* A cost measure that can be used to guide the optimization of the variables.\n* An optimization method which updates the variables of the model.\n\nIn addition, the TensorFlow graph may also contain various debugging statements e.g. for logging data to be displayed using TensorBoard, which is not covered in this tutorial.", "_____no_output_____" ], [ "### Placeholder variables", "_____no_output_____" ], [ "Placeholder variables serve as the input to the graph that we may change each time we execute the graph. We call this feeding the placeholder variables and it is demonstrated further below.\n\nFirst we define the placeholder variable for the input images. This allows us to change the images that are input to the TensorFlow graph. This is a so-called tensor, which just means that it is a multi-dimensional vector or matrix. The data-type is set to `float32` and the shape is set to `[None, img_size_flat]`, where `None` means that the tensor may hold an arbitrary number of images with each image being a vector of length `img_size_flat`.", "_____no_output_____" ] ], [ [ "x = tf.placeholder(tf.float32, [None, img_size_flat])\nimg_size_flat", "_____no_output_____" ] ], [ [ "Next we have the placeholder variable for the true labels associated with the images that were input in the placeholder variable `x`. The shape of this placeholder variable is `[None, num_classes]` which means it may hold an arbitrary number of labels and each label is a vector of length `num_classes` which is 10 in this case.", "_____no_output_____" ] ], [ [ "y_true = tf.placeholder(tf.float32, [None, num_classes])", "_____no_output_____" ] ], [ [ "Finally we have the placeholder variable for the true class of each image in the placeholder variable `x`. These are integers and the dimensionality of this placeholder variable is set to `[None]` which means the placeholder variable is a one-dimensional vector of arbitrary length.", "_____no_output_____" ] ], [ [ "y_true_cls = tf.placeholder(tf.int64, [None])", "_____no_output_____" ] ], [ [ "### Variables to be optimized", "_____no_output_____" ], [ "Apart from the placeholder variables that were defined above and which serve as feeding input data into the model, there are also some model variables that must be changed by TensorFlow so as to make the model perform better on the training data.\n\nThe first variable that must be optimized is called `weights` and is defined here as a TensorFlow variable that must be initialized with zeros and whose shape is `[img_size_flat, num_classes]`, so it is a 2-dimensional tensor (or matrix) with `img_size_flat` rows and `num_classes` columns.", "_____no_output_____" ] ], [ [ "weights = tf.Variable(tf.zeros([img_size_flat, num_classes]))", "_____no_output_____" ] ], [ [ "The second variable that must be optimized is called `biases` and is defined as a 1-dimensional tensor (or vector) of length `num_classes`.", "_____no_output_____" ] ], [ [ "biases = tf.Variable(tf.zeros([num_classes]))", "_____no_output_____" ] ], [ [ "### Model", "_____no_output_____" ], [ "This simple mathematical model multiplies the images in the placeholder variable `x` with the `weights` and then adds the `biases`.\n\nThe result is a matrix of shape `[num_images, num_classes]` because `x` has shape `[num_images, img_size_flat]` and `weights` has shape `[img_size_flat, num_classes]`, so the multiplication of those two matrices is a matrix with shape `[num_images, num_classes]` and then the `biases` vector is added to each row of that matrix.\n\nNote that the name `logits` is typical TensorFlow terminology, but other people may call the variable something else.", "_____no_output_____" ] ], [ [ "logits = tf.matmul(x, weights) + biases", "_____no_output_____" ] ], [ [ "Now `logits` is a matrix with `num_images` rows and `num_classes` columns, where the element of the $i$'th row and $j$'th column is an estimate of how likely the $i$'th input image is to be of the $j$'th class.\n\nHowever, these estimates are a bit rough and difficult to interpret because the numbers may be very small or large, so we want to normalize them so that each row of the `logits` matrix sums to one, and each element is limited between zero and one. This is calculated using the so-called softmax function and the result is stored in `y_pred`.", "_____no_output_____" ] ], [ [ "y_pred = tf.nn.softmax(logits)", "_____no_output_____" ] ], [ [ "The predicted class can be calculated from the `y_pred` matrix by taking the index of the largest element in each row.", "_____no_output_____" ] ], [ [ "y_pred_cls = tf.argmax(y_pred, axis=1)", "_____no_output_____" ] ], [ [ "### Cost-function to be optimized", "_____no_output_____" ], [ "To make the model better at classifying the input images, we must somehow change the variables for `weights` and `biases`. To do this we first need to know how well the model currently performs by comparing the predicted output of the model `y_pred` to the desired output `y_true`.\n\nThe cross-entropy is a performance measure used in classification. The cross-entropy is a continuous function that is always positive and if the predicted output of the model exactly matches the desired output then the cross-entropy equals zero. The goal of optimization is therefore to minimize the cross-entropy so it gets as close to zero as possible by changing the `weights` and `biases` of the model.\n\nTensorFlow has a built-in function for calculating the cross-entropy. Note that it uses the values of the `logits` because it also calculates the softmax internally.", "_____no_output_____" ] ], [ [ "cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits,\n labels=y_true)", "WARNING:tensorflow:From <ipython-input-27-a65440ade102>:2: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\n\nFuture major versions of TensorFlow will allow gradients to flow\ninto the labels input on backprop by default.\n\nSee @{tf.nn.softmax_cross_entropy_with_logits_v2}.\n\n" ] ], [ [ "We have now calculated the cross-entropy for each of the image classifications so we have a measure of how well the model performs on each image individually. But in order to use the cross-entropy to guide the optimization of the model's variables we need a single scalar value, so we simply take the average of the cross-entropy for all the image classifications.", "_____no_output_____" ] ], [ [ "cost = tf.reduce_mean(cross_entropy)", "_____no_output_____" ] ], [ [ "### Optimization method", "_____no_output_____" ], [ "Now that we have a cost measure that must be minimized, we can then create an optimizer. In this case it is the basic form of Gradient Descent where the step-size is set to 0.5.\n\nNote that optimization is not performed at this point. In fact, nothing is calculated at all, we just add the optimizer-object to the TensorFlow graph for later execution.", "_____no_output_____" ] ], [ [ "optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.5).minimize(cost)", "_____no_output_____" ] ], [ [ "### Performance measures", "_____no_output_____" ], [ "We need a few more performance measures to display the progress to the user.\n\nThis is a vector of booleans whether the predicted class equals the true class of each image.", "_____no_output_____" ] ], [ [ "correct_prediction = tf.equal(y_pred_cls, y_true_cls)", "_____no_output_____" ] ], [ [ "This calculates the classification accuracy by first type-casting the vector of booleans to floats, so that False becomes 0 and True becomes 1, and then calculating the average of these numbers.", "_____no_output_____" ] ], [ [ "accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))", "_____no_output_____" ] ], [ [ "## TensorFlow Run", "_____no_output_____" ], [ "### Create TensorFlow session\n\nOnce the TensorFlow graph has been created, we have to create a TensorFlow session which is used to execute the graph.", "_____no_output_____" ] ], [ [ "session = tf.Session()", "_____no_output_____" ] ], [ [ "### Initialize variables\n\nThe variables for `weights` and `biases` must be initialized before we start optimizing them.", "_____no_output_____" ] ], [ [ "session.run(tf.global_variables_initializer())", "_____no_output_____" ] ], [ [ "### Helper-function to perform optimization iterations", "_____no_output_____" ], [ "There are 50.000 images in the training-set. It takes a long time to calculate the gradient of the model using all these images. We therefore use Stochastic Gradient Descent which only uses a small batch of images in each iteration of the optimizer.", "_____no_output_____" ] ], [ [ "batch_size = 100", "_____no_output_____" ] ], [ [ "Function for performing a number of optimization iterations so as to gradually improve the `weights` and `biases` of the model. In each iteration, a new batch of data is selected from the training-set and then TensorFlow executes the optimizer using those training samples.", "_____no_output_____" ] ], [ [ "def optimize(num_iterations):\n for i in range(num_iterations):\n # Get a batch of training examples.\n # x_batch now holds a batch of images and\n # y_true_batch are the true labels for those images.\n x_batch, y_true_batch = data.train.next_batch(batch_size)\n \n # Put the batch into a dict with the proper names\n # for placeholder variables in the TensorFlow graph.\n # Note that the placeholder for y_true_cls is not set\n # because it is not used during training.\n feed_dict_train = {x: x_batch,\n y_true: y_true_batch}\n\n # Run the optimizer using this batch of training data.\n # TensorFlow assigns the variables in feed_dict_train\n # to the placeholder variables and then runs the optimizer.\n session.run(optimizer, feed_dict=feed_dict_train)", "_____no_output_____" ] ], [ [ "### Helper-functions to show performance", "_____no_output_____" ], [ "Dict with the test-set data to be used as input to the TensorFlow graph. Note that we must use the correct names for the placeholder variables in the TensorFlow graph.", "_____no_output_____" ] ], [ [ "feed_dict_test = {x: data.test.images,\n y_true: data.test.labels,\n y_true_cls: data.test.cls}", "_____no_output_____" ] ], [ [ "Function for printing the classification accuracy on the test-set.", "_____no_output_____" ] ], [ [ "def print_accuracy():\n # Use TensorFlow to compute the accuracy.\n acc = session.run(accuracy, feed_dict=feed_dict_test)\n \n # Print the accuracy.\n print(\"Accuracy on test-set: {0:.1%}\".format(acc))", "_____no_output_____" ] ], [ [ "Function for printing and plotting the confusion matrix using scikit-learn.", "_____no_output_____" ] ], [ [ "def print_confusion_matrix():\n # Get the true classifications for the test-set.\n cls_true = data.test.cls\n \n # Get the predicted classifications for the test-set.\n cls_pred = session.run(y_pred_cls, feed_dict=feed_dict_test)\n\n # Get the confusion matrix using sklearn.\n cm = confusion_matrix(y_true=cls_true,\n y_pred=cls_pred)\n\n # Print the confusion matrix as text.\n print(cm)\n\n # Plot the confusion matrix as an image.\n plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)\n\n # Make various adjustments to the plot.\n plt.tight_layout()\n plt.colorbar()\n tick_marks = np.arange(num_classes)\n plt.xticks(tick_marks, range(num_classes))\n plt.yticks(tick_marks, range(num_classes))\n plt.xlabel('Predicted')\n plt.ylabel('True')\n \n # Ensure the plot is shown correctly with multiple plots\n # in a single Notebook cell.\n plt.show()", "_____no_output_____" ] ], [ [ "Function for plotting examples of images from the test-set that have been mis-classified.", "_____no_output_____" ] ], [ [ "def plot_example_errors():\n # Use TensorFlow to get a list of boolean values\n # whether each test-image has been correctly classified,\n # and a list for the predicted class of each image.\n correct, cls_pred = session.run([correct_prediction, y_pred_cls],\n feed_dict=feed_dict_test)\n\n # Negate the boolean array.\n incorrect = (correct == False)\n \n # Get the images from the test-set that have been\n # incorrectly classified.\n images = data.test.images[incorrect]\n \n # Get the predicted classes for those images.\n cls_pred = cls_pred[incorrect]\n\n # Get the true classes for those images.\n cls_true = data.test.cls[incorrect]\n \n # Plot the first 9 images.\n plot_images(images=images[0:9],\n cls_true=cls_true[0:9],\n cls_pred=cls_pred[0:9])", "_____no_output_____" ] ], [ [ "### Helper-function to plot the model weights", "_____no_output_____" ], [ "Function for plotting the `weights` of the model. 10 images are plotted, one for each digit that the model is trained to recognize.", "_____no_output_____" ] ], [ [ "def plot_weights():\n # Get the values for the weights from the TensorFlow variable.\n w = session.run(weights)\n \n # Get the lowest and highest values for the weights.\n # This is used to correct the colour intensity across\n # the images so they can be compared with each other.\n w_min = np.min(w)\n w_max = np.max(w)\n\n # Create figure with 3x4 sub-plots,\n # where the last 2 sub-plots are unused.\n fig, axes = plt.subplots(3, 4)\n fig.subplots_adjust(hspace=0.3, wspace=0.3)\n\n for i, ax in enumerate(axes.flat):\n # Only use the weights for the first 10 sub-plots.\n if i<10:\n # Get the weights for the i'th digit and reshape it.\n # Note that w.shape == (img_size_flat, 10)\n image = w[:, i].reshape(img_shape)\n\n # Set the label for the sub-plot.\n ax.set_xlabel(\"Weights: {0}\".format(i))\n\n # Plot the image.\n ax.imshow(image, vmin=w_min, vmax=w_max, cmap='seismic')\n\n # Remove ticks from each sub-plot.\n ax.set_xticks([])\n ax.set_yticks([])\n \n # Ensure the plot is shown correctly with multiple plots\n # in a single Notebook cell.\n plt.show()", "_____no_output_____" ] ], [ [ "## Performance before any optimization\n\nThe accuracy on the test-set is 9.8%. This is because the model has only been initialized and not optimized at all, so it always predicts that the image shows a zero digit, as demonstrated in the plot below, and it turns out that 9.8% of the images in the test-set happens to be zero digits.", "_____no_output_____" ] ], [ [ "print_accuracy()", "Accuracy on test-set: 9.8%\n" ], [ "plot_example_errors()", "_____no_output_____" ] ], [ [ "## Performance after 1 optimization iteration\n\nAlready after a single optimization iteration, the model has increased its accuracy on the test-set to 40.7% up from 9.8%. This means that it mis-classifies the images about 6 out of 10 times, as demonstrated on a few examples below.", "_____no_output_____" ] ], [ [ "optimize(num_iterations=1)", "_____no_output_____" ], [ "print_accuracy()", "Accuracy on test-set: 28.0%\n" ], [ "plot_example_errors()", "_____no_output_____" ] ], [ [ "The weights can also be plotted as shown below. Positive weights are red and negative weights are blue. These weights can be intuitively understood as image-filters.\n\nFor example, the weights used to determine if an image shows a zero-digit have a positive reaction (red) to an image of a circle, and have a negative reaction (blue) to images with content in the centre of the circle.\n\nSimilarly, the weights used to determine if an image shows a one-digit react positively (red) to a vertical line in the centre of the image, and react negatively (blue) to images with content surrounding that line.\n\nNote that the weights mostly look like the digits they're supposed to recognize. This is because only one optimization iteration has been performed so the weights are only trained on 100 images. After training on several thousand images, the weights become more difficult to interpret because they have to recognize many variations of how digits can be written.", "_____no_output_____" ] ], [ [ "plot_weights()", "_____no_output_____" ] ], [ [ "## Performance after 10 optimization iterations", "_____no_output_____" ] ], [ [ "# We have already performed 1 iteration.\noptimize(num_iterations=9)", "_____no_output_____" ], [ "print_accuracy()", "Accuracy on test-set: 76.9%\n" ], [ "plot_example_errors()", "_____no_output_____" ], [ "plot_weights()", "_____no_output_____" ] ], [ [ "## Performance after 1000 optimization iterations\n\nAfter 1000 optimization iterations, the model only mis-classifies about one in ten images. As demonstrated below, some of the mis-classifications are justified because the images are very hard to determine with certainty even for humans, while others are quite obvious and should have been classified correctly by a good model. But this simple model cannot reach much better performance and more complex models are therefore needed.", "_____no_output_____" ] ], [ [ "# We have already performed 10 iterations.\noptimize(num_iterations=990)", "_____no_output_____" ], [ "print_accuracy()", "Accuracy on test-set: 91.5%\n" ], [ "plot_example_errors()", "_____no_output_____" ] ], [ [ "The model has now been trained for 1000 optimization iterations, with each iteration using 100 images from the training-set. Because of the great variety of the images, the weights have now become difficult to interpret and we may doubt whether the model truly understands how digits are composed from lines, or whether the model has just memorized many different variations of pixels.", "_____no_output_____" ] ], [ [ "plot_weights()", "_____no_output_____" ] ], [ [ "We can also print and plot the so-called confusion matrix which lets us see more details about the mis-classifications. For example, it shows that images actually depicting a 5 have sometimes been mis-classified as all other possible digits, but mostly either 3, 6 or 8.", "_____no_output_____" ] ], [ [ "print_confusion_matrix()", "[[ 952 0 0 1 0 10 13 2 2 0]\n [ 0 1109 2 2 1 2 4 2 13 0]\n [ 6 11 889 16 16 7 17 18 46 6]\n [ 3 1 14 901 1 36 5 15 19 15]\n [ 1 1 2 1 918 0 16 2 9 32]\n [ 8 3 1 27 7 784 20 8 26 8]\n [ 7 3 2 2 9 12 920 2 1 0]\n [ 2 10 19 8 6 1 0 952 2 28]\n [ 5 6 4 17 9 37 13 13 859 11]\n [ 10 6 1 9 42 8 1 31 7 894]]\n" ] ], [ [ "We are now done using TensorFlow, so we close the session to release its resources.", "_____no_output_____" ] ], [ [ "# This has been commented out in case you want to modify and experiment\n# with the Notebook without having to restart it.\n# session.close()", "_____no_output_____" ] ], [ [ "## Exercises\n\nThese are a few suggestions for exercises that may help improve your skills with TensorFlow. It is important to get hands-on experience with TensorFlow in order to learn how to use it properly.\n\nYou may want to backup this Notebook before making any changes.\n\n* Change the learning-rate for the optimizer.\n* Change the optimizer to e.g. `AdagradOptimizer` or `AdamOptimizer`.\n* Change the batch-size to e.g. 1 or 1000.\n* How do these changes affect the performance?\n* Do you think these changes will have the same effect (if any) on other classification problems and mathematical models?\n* Do you get the exact same results if you run the Notebook multiple times without changing any parameters? Why or why not?\n* Change the function `plot_example_errors()` so it also prints the `logits` and `y_pred` values for the mis-classified examples.\n* Use `sparse_softmax_cross_entropy_with_logits` instead of `softmax_cross_entropy_with_logits`. This may require several changes to multiple places in the source-code. Discuss the advantages and disadvantages of using the two methods.\n* Remake the program yourself without looking too much at this source-code.\n* Explain to a friend how the program works.", "_____no_output_____" ], [ "## License (MIT)\n\nCopyright (c) 2016 by [Magnus Erik Hvass Pedersen](http://www.hvass-labs.org/)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
c52e1c2f98bbfe3484e6ce5499b67ac1d5d3b997
388,263
ipynb
Jupyter Notebook
wgan/W-GAN.ipynb
kartikgill/TF2-Keras-GAN-Notebooks
16220377844d904ed639e7e5bd67602ccad407cc
[ "MIT" ]
11
2021-01-05T05:27:42.000Z
2022-01-24T09:49:02.000Z
wgan/W-GAN.ipynb
kartikgill/TF2-Keras-GAN-Notebooks
16220377844d904ed639e7e5bd67602ccad407cc
[ "MIT" ]
null
null
null
wgan/W-GAN.ipynb
kartikgill/TF2-Keras-GAN-Notebooks
16220377844d904ed639e7e5bd67602ccad407cc
[ "MIT" ]
4
2021-02-20T10:24:52.000Z
2022-01-24T09:48:58.000Z
374.049133
19,676
0.934202
[ [ [ "<a href=\"https://colab.research.google.com/github/kartikgill/The-GAN-Book/blob/main/Skill-07/W-GAN.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Importing useful Libraries", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm_notebook\n%matplotlib inline", "_____no_output_____" ], [ "import tensorflow\nprint (tensorflow.__version__)", "2.4.1\n" ] ], [ [ "# Download and show data", "_____no_output_____" ] ], [ [ "from tensorflow.keras.datasets import fashion_mnist, mnist\n\n(trainX, trainY), (testX, testY) = mnist.load_data()\n\nprint('Training data shapes: X=%s, y=%s' % (trainX.shape, trainY.shape))\nprint('Testing data shapes: X=%s, y=%s' % (testX.shape, testY.shape))\n\n\nfor k in range(9):\n plt.figure(figsize=(9, 6))\n for j in range(9):\n i = np.random.randint(0, 10000)\n plt.subplot(990 + 1 + j)\n plt.imshow(trainX[i], cmap='gray_r')\n #plt.title(trainY[i])\n plt.axis('off')\n plt.show()", "Training data shapes: X=(60000, 28, 28), y=(60000,)\nTesting data shapes: X=(10000, 28, 28), y=(10000,)\n" ], [ "#Ten classes\nset(trainY)", "_____no_output_____" ] ], [ [ "# Data Normalization", "_____no_output_____" ] ], [ [ "trainX = [(image-127.5)/127.5 for image in trainX]\ntestX = [(image-127.5)/127.5 for image in testX]\n\ntrainX = np.reshape(trainX, (60000, 28, 28, 1))\ntestX = np.reshape(testX, (10000, 28, 28, 1))\n\nprint (trainX.shape, testX.shape, trainY.shape, testY.shape)", "(60000, 28, 28, 1) (10000, 28, 28, 1) (60000,) (10000,)\n" ] ], [ [ "# Define Generator Model", "_____no_output_____" ] ], [ [ "random_input = tensorflow.keras.layers.Input(shape = 100)\n\nx = tensorflow.keras.layers.Dense(7*7*128)(random_input)\nx = tensorflow.keras.layers.Reshape((7, 7, 128))(x)\n\nx = tensorflow.keras.layers.Conv2DTranspose(filters=128, kernel_size=(3,3), strides=2, padding='same')(x)\nx = tensorflow.keras.layers.BatchNormalization(momentum=0.8)(x)\nx = tensorflow.keras.layers.Activation('relu')(x)\n\nx = tensorflow.keras.layers.Conv2DTranspose(filters=128, kernel_size=(3,3), strides=2, padding='same')(x)\nx = tensorflow.keras.layers.BatchNormalization(momentum=0.8)(x)\nx = tensorflow.keras.layers.Activation('relu')(x)\n\nx = tensorflow.keras.layers.Conv2DTranspose(filters=128, kernel_size=(3,3), padding='same')(x)\nx = tensorflow.keras.layers.Activation('relu')(x)\n\nx = tensorflow.keras.layers.Conv2DTranspose(filters=1, kernel_size=(4,4), padding='same')(x)\ngenerated_image = tensorflow.keras.layers.Activation('tanh')(x)\n\ngenerator_network = tensorflow.keras.models.Model(inputs=random_input, outputs=generated_image)\ngenerator_network.summary()", "Model: \"model\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ninput_1 (InputLayer) [(None, 100)] 0 \n_________________________________________________________________\ndense (Dense) (None, 6272) 633472 \n_________________________________________________________________\nreshape (Reshape) (None, 7, 7, 128) 0 \n_________________________________________________________________\nconv2d_transpose (Conv2DTran (None, 14, 14, 128) 147584 \n_________________________________________________________________\nbatch_normalization (BatchNo (None, 14, 14, 128) 512 \n_________________________________________________________________\nactivation (Activation) (None, 14, 14, 128) 0 \n_________________________________________________________________\nconv2d_transpose_1 (Conv2DTr (None, 28, 28, 128) 147584 \n_________________________________________________________________\nbatch_normalization_1 (Batch (None, 28, 28, 128) 512 \n_________________________________________________________________\nactivation_1 (Activation) (None, 28, 28, 128) 0 \n_________________________________________________________________\nconv2d_transpose_2 (Conv2DTr (None, 28, 28, 128) 147584 \n_________________________________________________________________\nactivation_2 (Activation) (None, 28, 28, 128) 0 \n_________________________________________________________________\nconv2d_transpose_3 (Conv2DTr (None, 28, 28, 1) 2049 \n_________________________________________________________________\nactivation_3 (Activation) (None, 28, 28, 1) 0 \n=================================================================\nTotal params: 1,079,297\nTrainable params: 1,078,785\nNon-trainable params: 512\n_________________________________________________________________\n" ] ], [ [ "# Define Critic", "_____no_output_____" ] ], [ [ "image_input = tensorflow.keras.layers.Input(shape=(28, 28, 1))\n\nx = tensorflow.keras.layers.Conv2D(filters=64, kernel_size=(3,3), strides=2, padding='same')(image_input)\nx = tensorflow.keras.layers.LeakyReLU(alpha=0.2)(x)\nx = tensorflow.keras.layers.Dropout(0.25)(x)\n\nx = tensorflow.keras.layers.Conv2D(filters=128, kernel_size=(3,3), strides=2, padding='same')(x)\nx = tensorflow.keras.layers.BatchNormalization(momentum=0.8)(x)\nx = tensorflow.keras.layers.LeakyReLU(alpha=0.2)(x)\nx = tensorflow.keras.layers.Dropout(0.25)(x)\n\nx = tensorflow.keras.layers.Conv2D(filters=128, kernel_size=(3,3), strides=2, padding='same')(x)\nx = tensorflow.keras.layers.BatchNormalization(momentum=0.8)(x)\nx = tensorflow.keras.layers.LeakyReLU(alpha=0.2)(x)\nx = tensorflow.keras.layers.Dropout(0.25)(x)\n\nx = tensorflow.keras.layers.Conv2D(filters=256, kernel_size=(3,3), padding='same')(x)\nx = tensorflow.keras.layers.BatchNormalization(momentum=0.8)(x)\nx = tensorflow.keras.layers.LeakyReLU(alpha=0.2)(x)\nx = tensorflow.keras.layers.Dropout(0.25)(x)\n\nx = tensorflow.keras.layers.Flatten()(x)\n\n# No activation in final layer\nc_out = tensorflow.keras.layers.Dense(1)(x)\n\ncritic_network = tensorflow.keras.models.Model(inputs=image_input, outputs=c_out)\n\nprint (critic_network.summary())", "Model: \"model_1\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ninput_2 (InputLayer) [(None, 28, 28, 1)] 0 \n_________________________________________________________________\nconv2d (Conv2D) (None, 14, 14, 64) 640 \n_________________________________________________________________\nleaky_re_lu (LeakyReLU) (None, 14, 14, 64) 0 \n_________________________________________________________________\ndropout (Dropout) (None, 14, 14, 64) 0 \n_________________________________________________________________\nconv2d_1 (Conv2D) (None, 7, 7, 128) 73856 \n_________________________________________________________________\nbatch_normalization_2 (Batch (None, 7, 7, 128) 512 \n_________________________________________________________________\nleaky_re_lu_1 (LeakyReLU) (None, 7, 7, 128) 0 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 7, 7, 128) 0 \n_________________________________________________________________\nconv2d_2 (Conv2D) (None, 4, 4, 128) 147584 \n_________________________________________________________________\nbatch_normalization_3 (Batch (None, 4, 4, 128) 512 \n_________________________________________________________________\nleaky_re_lu_2 (LeakyReLU) (None, 4, 4, 128) 0 \n_________________________________________________________________\ndropout_2 (Dropout) (None, 4, 4, 128) 0 \n_________________________________________________________________\nconv2d_3 (Conv2D) (None, 4, 4, 256) 295168 \n_________________________________________________________________\nbatch_normalization_4 (Batch (None, 4, 4, 256) 1024 \n_________________________________________________________________\nleaky_re_lu_3 (LeakyReLU) (None, 4, 4, 256) 0 \n_________________________________________________________________\ndropout_3 (Dropout) (None, 4, 4, 256) 0 \n_________________________________________________________________\nflatten (Flatten) (None, 4096) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 1) 4097 \n=================================================================\nTotal params: 523,393\nTrainable params: 522,369\nNon-trainable params: 1,024\n_________________________________________________________________\nNone\n" ] ], [ [ "# Define Wasserstein Loss", "_____no_output_____" ] ], [ [ "# custom loss function\ndef wasserstein_loss(y_true, y_pred):\n return tensorflow.keras.backend.mean(y_true * y_pred)", "_____no_output_____" ] ], [ [ "# Compiling Critic Network", "_____no_output_____" ] ], [ [ "RMSprop_optimizer = tensorflow.keras.optimizers.RMSprop(lr=0.00005)\ncritic_network.compile(loss=wasserstein_loss, optimizer=RMSprop_optimizer, metrics=['accuracy'])", "_____no_output_____" ] ], [ [ "# Define Wasserstein GAN (W-GAN)", "_____no_output_____" ] ], [ [ "critic_network.trainable=False\n\ng_output = generator_network(random_input)\nc_output = critic_network(g_output)\n\nwgan_model = tensorflow.keras.models.Model(inputs = random_input, outputs = c_output)\nwgan_model.summary()", "Model: \"model_2\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ninput_1 (InputLayer) [(None, 100)] 0 \n_________________________________________________________________\nmodel (Functional) (None, 28, 28, 1) 1079297 \n_________________________________________________________________\nmodel_1 (Functional) (None, 1) 523393 \n=================================================================\nTotal params: 1,602,690\nTrainable params: 1,078,785\nNon-trainable params: 523,905\n_________________________________________________________________\n" ] ], [ [ "# Compiling WGAN", "_____no_output_____" ] ], [ [ "wgan_model.compile(loss=wasserstein_loss, optimizer=RMSprop_optimizer)", "_____no_output_____" ] ], [ [ "# Define Data Generators", "_____no_output_____" ] ], [ [ "indices = [i for i in range(0, len(trainX))]\n\ndef get_random_noise(batch_size, noise_size):\n random_values = np.random.randn(batch_size*noise_size)\n random_noise_batches = np.reshape(random_values, (batch_size, noise_size))\n return random_noise_batches\n\ndef get_fake_samples(generator_network, batch_size, noise_size):\n random_noise_batches = get_random_noise(batch_size, noise_size) \n fake_samples = generator_network.predict_on_batch(random_noise_batches)\n return fake_samples\n\ndef get_real_samples(batch_size):\n random_indices = np.random.choice(indices, size=batch_size)\n real_images = trainX[np.array(random_indices),:]\n return real_images\n\ndef show_generator_results(generator_network):\n for k in range(7):\n plt.figure(figsize=(9, 6))\n random_noise_batches = get_random_noise(7, noise_size) \n fake_samples = generator_network.predict_on_batch(random_noise_batches)\n for j in range(7):\n i = j\n plt.subplot(770 + 1 + j)\n plt.imshow(((fake_samples[i,:,:,-1])/2.0)+0.5, cmap='gray_r')\n plt.axis('off')\n plt.show()\n return", "_____no_output_____" ] ], [ [ "# Training W-GAN", "_____no_output_____" ] ], [ [ "epochs = 500\nbatch_size = 64\nsteps = 500\nnoise_size = 100\n\nfor i in range(0, epochs):\n if (i%1 == 0):\n op = show_generator_results(generator_network)\n #print (op)\n for j in range(steps):\n # With Number of Critics=5\n for _ in range(5):\n fake_samples = get_fake_samples(generator_network, batch_size//2, noise_size)\n real_samples = get_real_samples(batch_size=batch_size//2)\n\n fake_y = np.ones((batch_size//2, 1))\n real_y = -1 * np.ones((batch_size//2, 1))\n\n # Updating Critic weights\n\n critic_network.trainable=True\n loss_c_real = critic_network.train_on_batch(real_samples, real_y)\n loss_c_fake = critic_network.train_on_batch(fake_samples, fake_y)\n \n loss_c = np.add(loss_c_real, loss_c_fake)/2.0\n\n # Clip critic weights\n for l in critic_network.layers:\n weights = l.get_weights()\n weights = [np.clip(w, -0.01, 0.01) for w in weights]\n l.set_weights(weights)\n \n if False:\n print (\"C_real_loss: %.3f, C_fake_loss: %.3f, C_loss: %.3f\"%(loss_c_real[0], loss_c_fake[0], loss_c[0]))\n \n noise_batches = get_random_noise(batch_size, noise_size)\n \n wgan_input = noise_batches\n \n # Make the Discriminator belive that these are real samples and calculate loss to train the generator\n wgan_output = -1 * np.ones((batch_size, 1))\n \n # Updating Generator weights\n critic_network.trainable=False\n loss_g = wgan_model.train_on_batch(wgan_input, wgan_output)\n \n if j%50 == 0:\n print (\"Epoch:%.0f, Step:%.0f, C-Loss:%.6f, G-Loss:%.6f\"%(i,j,loss_c[0] ,loss_g))", "_____no_output_____" ] ], [ [ "# Results", "_____no_output_____" ] ], [ [ "for i in range(2):\n show_generator_results(generator_network)\n print(\"-\"*100)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
c52e28ba92c504459abeb0f4c3338e19ad3cac92
11,742
ipynb
Jupyter Notebook
model-inference/decisionTree/experiments/hummingbird/notebooks/XGB-example.ipynb
asu-cactus/netsdb
fe6ae0e1dcd07ea0b9a47ff4a657d5ed029c7af5
[ "Apache-2.0" ]
13
2022-01-17T16:14:26.000Z
2022-03-30T02:06:04.000Z
model-inference/decisionTree/experiments/hummingbird/notebooks/XGB-example.ipynb
asu-cactus/netsdb
fe6ae0e1dcd07ea0b9a47ff4a657d5ed029c7af5
[ "Apache-2.0" ]
1
2022-01-28T23:17:14.000Z
2022-01-28T23:17:14.000Z
model-inference/decisionTree/experiments/hummingbird/notebooks/XGB-example.ipynb
asu-cactus/netsdb
fe6ae0e1dcd07ea0b9a47ff4a657d5ed029c7af5
[ "Apache-2.0" ]
3
2022-01-18T02:13:53.000Z
2022-03-06T19:28:19.000Z
58.41791
348
0.651763
[ [ [ "!pip install hummingbird_ml[extra]", "Requirement already satisfied: hummingbird_ml[extra] in /home/mahidhar/.local/lib/python3.8/site-packages (0.4.4)\nRequirement already satisfied: scikit-learn>=0.21.3 in /home/mahidhar/.local/lib/python3.8/site-packages (from hummingbird_ml[extra]) (1.0.2)\nRequirement already satisfied: onnxconverter-common>=1.6.0 in /home/mahidhar/.local/lib/python3.8/site-packages (from hummingbird_ml[extra]) (1.9.0)\nRequirement already satisfied: scipy in /home/mahidhar/.local/lib/python3.8/site-packages (from hummingbird_ml[extra]) (1.8.0)\nRequirement already satisfied: psutil in /home/mahidhar/.local/lib/python3.8/site-packages (from hummingbird_ml[extra]) (5.9.0)\nRequirement already satisfied: dill in /home/mahidhar/.local/lib/python3.8/site-packages (from hummingbird_ml[extra]) (0.3.4)\nRequirement already satisfied: torch>=1.4 in /home/mahidhar/.local/lib/python3.8/site-packages (from hummingbird_ml[extra]) (1.11.0+cu113)\nRequirement already satisfied: numpy>=1.15 in /home/mahidhar/.local/lib/python3.8/site-packages (from hummingbird_ml[extra]) (1.22.3)\nRequirement already satisfied: xgboost>=0.90; extra == \"extra\" in /home/mahidhar/.local/lib/python3.8/site-packages (from hummingbird_ml[extra]) (1.6.0)\nRequirement already satisfied: lightgbm>=2.2; extra == \"extra\" in /home/mahidhar/.local/lib/python3.8/site-packages (from hummingbird_ml[extra]) (3.3.2)\nRequirement already satisfied: prophet==1.0.1; extra == \"extra\" in /home/mahidhar/.local/lib/python3.8/site-packages (from hummingbird_ml[extra]) (1.0.1)\nRequirement already satisfied: threadpoolctl>=2.0.0 in /home/mahidhar/.local/lib/python3.8/site-packages (from scikit-learn>=0.21.3->hummingbird_ml[extra]) (3.1.0)\nRequirement already satisfied: joblib>=0.11 in /home/mahidhar/.local/lib/python3.8/site-packages (from scikit-learn>=0.21.3->hummingbird_ml[extra]) (1.1.0)\nRequirement already satisfied: protobuf in /home/mahidhar/.local/lib/python3.8/site-packages (from onnxconverter-common>=1.6.0->hummingbird_ml[extra]) (3.19.4)\nRequirement already satisfied: onnx in /home/mahidhar/.local/lib/python3.8/site-packages (from onnxconverter-common>=1.6.0->hummingbird_ml[extra]) (1.11.0)\nRequirement already satisfied: typing-extensions in /home/mahidhar/.local/lib/python3.8/site-packages (from torch>=1.4->hummingbird_ml[extra]) (4.1.1)\nRequirement already satisfied: wheel in /usr/lib/python3/dist-packages (from lightgbm>=2.2; extra == \"extra\"->hummingbird_ml[extra]) (0.34.2)\nRequirement already satisfied: Cython>=0.22 in /home/mahidhar/.local/lib/python3.8/site-packages (from prophet==1.0.1; extra == \"extra\"->hummingbird_ml[extra]) (0.29.28)\nRequirement already satisfied: LunarCalendar>=0.0.9 in /home/mahidhar/.local/lib/python3.8/site-packages (from prophet==1.0.1; extra == \"extra\"->hummingbird_ml[extra]) (0.0.9)\nRequirement already satisfied: cmdstanpy==0.9.68 in /home/mahidhar/.local/lib/python3.8/site-packages (from prophet==1.0.1; extra == \"extra\"->hummingbird_ml[extra]) (0.9.68)\nRequirement already satisfied: convertdate>=2.1.2 in /home/mahidhar/.local/lib/python3.8/site-packages (from prophet==1.0.1; extra == \"extra\"->hummingbird_ml[extra]) (2.4.0)\nRequirement already satisfied: holidays>=0.10.2 in /home/mahidhar/.local/lib/python3.8/site-packages (from prophet==1.0.1; extra == \"extra\"->hummingbird_ml[extra]) (0.13)\nRequirement already satisfied: matplotlib>=2.0.0 in /home/mahidhar/.local/lib/python3.8/site-packages (from prophet==1.0.1; extra == \"extra\"->hummingbird_ml[extra]) (3.5.1)\nRequirement already satisfied: pandas>=1.0.4 in /home/mahidhar/.local/lib/python3.8/site-packages (from prophet==1.0.1; extra == \"extra\"->hummingbird_ml[extra]) (1.4.1)\nRequirement already satisfied: pystan~=2.19.1.1 in /home/mahidhar/.local/lib/python3.8/site-packages (from prophet==1.0.1; extra == \"extra\"->hummingbird_ml[extra]) (2.19.1.1)\nRequirement already satisfied: python-dateutil>=2.8.0 in /home/mahidhar/.local/lib/python3.8/site-packages (from prophet==1.0.1; extra == \"extra\"->hummingbird_ml[extra]) (2.8.2)\nRequirement already satisfied: setuptools-git>=1.2 in /home/mahidhar/.local/lib/python3.8/site-packages (from prophet==1.0.1; extra == \"extra\"->hummingbird_ml[extra]) (1.2)\nRequirement already satisfied: tqdm>=4.36.1 in /home/mahidhar/.local/lib/python3.8/site-packages (from prophet==1.0.1; extra == \"extra\"->hummingbird_ml[extra]) (4.63.2)\nRequirement already satisfied: ephem>=3.7.5.3 in /home/mahidhar/.local/lib/python3.8/site-packages (from LunarCalendar>=0.0.9->prophet==1.0.1; extra == \"extra\"->hummingbird_ml[extra]) (4.1.3)\nRequirement already satisfied: pytz in /home/mahidhar/.local/lib/python3.8/site-packages (from LunarCalendar>=0.0.9->prophet==1.0.1; extra == \"extra\"->hummingbird_ml[extra]) (2022.1)\nRequirement already satisfied: ujson in /home/mahidhar/.local/lib/python3.8/site-packages (from cmdstanpy==0.9.68->prophet==1.0.1; extra == \"extra\"->hummingbird_ml[extra]) (5.2.0)\nRequirement already satisfied: pymeeus<=1,>=0.3.13 in /home/mahidhar/.local/lib/python3.8/site-packages (from convertdate>=2.1.2->prophet==1.0.1; extra == \"extra\"->hummingbird_ml[extra]) (0.5.11)\nRequirement already satisfied: korean-lunar-calendar in /home/mahidhar/.local/lib/python3.8/site-packages (from holidays>=0.10.2->prophet==1.0.1; extra == \"extra\"->hummingbird_ml[extra]) (0.2.1)\nRequirement already satisfied: hijri-converter in /home/mahidhar/.local/lib/python3.8/site-packages (from holidays>=0.10.2->prophet==1.0.1; extra == \"extra\"->hummingbird_ml[extra]) (2.2.3)\nRequirement already satisfied: packaging>=20.0 in /home/mahidhar/.local/lib/python3.8/site-packages (from matplotlib>=2.0.0->prophet==1.0.1; extra == \"extra\"->hummingbird_ml[extra]) (21.3)\nRequirement already satisfied: pillow>=6.2.0 in /usr/lib/python3/dist-packages (from matplotlib>=2.0.0->prophet==1.0.1; extra == \"extra\"->hummingbird_ml[extra]) (7.0.0)\nRequirement already satisfied: kiwisolver>=1.0.1 in /home/mahidhar/.local/lib/python3.8/site-packages (from matplotlib>=2.0.0->prophet==1.0.1; extra == \"extra\"->hummingbird_ml[extra]) (1.4.0)\nRequirement already satisfied: fonttools>=4.22.0 in /home/mahidhar/.local/lib/python3.8/site-packages (from matplotlib>=2.0.0->prophet==1.0.1; extra == \"extra\"->hummingbird_ml[extra]) (4.30.0)\nRequirement already satisfied: cycler>=0.10 in /home/mahidhar/.local/lib/python3.8/site-packages (from matplotlib>=2.0.0->prophet==1.0.1; extra == \"extra\"->hummingbird_ml[extra]) (0.11.0)\nRequirement already satisfied: pyparsing>=2.2.1 in /home/mahidhar/.local/lib/python3.8/site-packages (from matplotlib>=2.0.0->prophet==1.0.1; extra == \"extra\"->hummingbird_ml[extra]) (3.0.7)\nRequirement already satisfied: six>=1.5 in /usr/lib/python3/dist-packages (from python-dateutil>=2.8.0->prophet==1.0.1; extra == \"extra\"->hummingbird_ml[extra]) (1.14.0)\n" ], [ "import numpy as np\nimport xgboost as xgb\nfrom hummingbird.ml import convert\n\n# Create some random data for binary classification.\nnum_classes = 2\nX = np.random.rand(100000, 28)\ny = np.random.randint(num_classes, size=100000)", "/home/mahidhar/.local/lib/python3.8/site-packages/tqdm/auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n from .autonotebook import tqdm as notebook_tqdm\n/home/mahidhar/.local/lib/python3.8/site-packages/sklearn/experimental/enable_hist_gradient_boosting.py:16: UserWarning: Since version 1.0, it is not needed to import enable_hist_gradient_boosting anymore. HistGradientBoostingClassifier and HistGradientBoostingRegressor are now stable and can be normally imported from sklearn.ensemble.\n warnings.warn(\nImporting plotly failed. Interactive plots will not work.\n" ], [ "# Create and train a model (XGBoost in this case).\nmodel = xgb.XGBClassifier(n_estimators = 10, max_depth=8)\nmodel.fit(X, y)", "_____no_output_____" ], [ "# Use Hummingbird to convert the model to PyTorch\n# Note that XGBRegressor requires us to pass it some sample data.\nhb_model = convert(model, 'torch', X[0:1])", "_____no_output_____" ], [ "%%timeit -r 3\n\n# Run Hummingbird on CPU - By default CPU execution is used in Hummingbird.\nhb_model.predict(X)", "62.2 ms ± 2.88 ms per loop (mean ± std. dev. of 3 runs, 10 loops each)\n" ], [ "%%timeit -r 3\n\n# Run Hummingbird on GPU (Note that you must have a GPU-enabled machine).\nhb_model.to('cuda')\nhb_model.predict(X)", "21.8 ms ± 53.8 µs per loop (mean ± std. dev. of 3 runs, 1 loop each)\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
c52e39533a9fd094e2d7567f44468e99b3778a53
23,433
ipynb
Jupyter Notebook
Transform/dummy_variable.ipynb
haataa/worldbank_ETL
416d1907778927a98e525ff514b76264faebb2ed
[ "CNRI-Python" ]
null
null
null
Transform/dummy_variable.ipynb
haataa/worldbank_ETL
416d1907778927a98e525ff514b76264faebb2ed
[ "CNRI-Python" ]
null
null
null
Transform/dummy_variable.ipynb
haataa/worldbank_ETL
416d1907778927a98e525ff514b76264faebb2ed
[ "CNRI-Python" ]
null
null
null
41.40106
426
0.433193
[ [ [ "# Dummy Variables Exercise\n\nIn this exercise, you'll create dummy variables from the projects data set. The idea is to transform categorical data like this:\n\n| Project ID | Project Category |\n|------------|------------------|\n| 0 | Energy |\n| 1 | Transportation |\n| 2 | Health |\n| 3 | Employment |\n\ninto new features that look like this:\n\n| Project ID | Energy | Transportation | Health | Employment |\n|------------|--------|----------------|--------|------------|\n| 0 | 1 | 0 | 0 | 0 |\n| 1 | 0 | 1 | 0 | 0 |\n| 2 | 0 | 0 | 1 | 0 |\n| 3 | 0 | 0 | 0 | 1 |\n\n\n(Note if you were going to use this data with a model influenced by multicollinearity, you would want to eliminate one of the columns to avoid redundant information.) \n\nThe reasoning behind these transformations is that machine learning algorithms read in numbers not text. Text needs to be converted into numbers. You could assign a number to each category like 1, 2, 3, and 4. But a categorical variable has no inherent order, so you want to reflect this in your features.\n\nPandas makes it very easy to create dummy variables with the [get_dummies](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.get_dummies.html) method. In this exercise, you'll create dummy variables from the World Bank projects data; however, there's a caveat. The World Bank data is not particularly clean, so you'll need to explore and wrangle the data first.\n\nYou'll focus on the text values in the sector variables.\n\nRun the code cells below to read in the World Bank projects data set and then to filter out the data for text variables. ", "_____no_output_____" ], [ "# Check Category Data", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\n\n# read in the projects data set and do basic wrangling \nprojects = pd.read_csv('./data/projects_data.csv', dtype=str)\nprojects.drop('Unnamed: 56', axis=1, inplace=True)\nprojects['totalamt'] = pd.to_numeric(projects['totalamt'].str.replace(',', ''))\nprojects['countryname'] = projects['countryname'].str.split(';', expand=True)[0]\nprojects['boardapprovaldate'] = pd.to_datetime(projects['boardapprovaldate'])\n\n# keep the project name, lending, sector and theme data\n\n\nsector = projects.copy()\nsector = sector[['project_name', 'lendinginstr', 'sector1', 'sector2', 'sector3', 'sector4', 'sector5', 'sector',\n 'mjsector1', 'mjsector2', 'mjsector3', 'mjsector4', 'mjsector5',\n 'mjsector', 'theme1', 'theme2', 'theme3', 'theme4', 'theme5', 'theme ',\n 'goal', 'financier', 'mjtheme1name', 'mjtheme2name', 'mjtheme3name',\n 'mjtheme4name', 'mjtheme5name']]\n# output percentage of values that are missing\n100 * sector.isnull().sum() / sector.shape[0]", "_____no_output_____" ], [ "uniquesectors1 = list(sector['sector1'].sort_values().unique())\nuniquesectors1[0:10]", "_____no_output_____" ], [ "print('Number of unique values in sector1:', len(uniquesectors1))", "Number of unique values in sector1: 3060\n" ] ], [ [ "# Clean Category Data\n3060 different categories is quite a lot! Remember that with dummy variables, if you have n categorical values, you need n - 1 new variables! That means 3059 extra columns! \n\nThere are a few issues with this 'sector1' variable. First, there are values labeled '!$!0'. These should be substituted with NaN.\n\nFurthermore, each sector1 value ends with a ten or eleven character string like '!$!49!$!EP'. Some sectors show up twice in the list like:\n 'Other Industry; Trade and Services!$!70!$!YZ',\n 'Other Industry; Trade and Services!$!63!$!YZ',\n\nBut it seems like those are actually the same sector. You'll need to remove everything past the exclamation point. \n\nMany values in the sector1 variable start with the term '(Historic)'. Try removing that phrase as well.\n\n### replace() method\n\nWith pandas, you can use the replace() method to search for text and replace parts of a string with another string. If you know the exact string you're looking for, the replace() method is straight forward. For example, say you wanted to remove the string '(Trial)' from this data:\n\n| data |\n|--------------------------|\n| '(Trial) Banking' |\n| 'Banking' |\n| 'Farming' |\n| '(Trial) Transportation' |\n\nYou could use `df['data'].replace('(Trial'), '')` to replace (Trial) with an empty string.\n\n### regular expressions\nWhat about this data?\n\n| data |\n|------------------------------------------------|\n| 'Other Industry; Trade and Services?$ab' |\n| 'Other Industry; Trade and Services?ceg' |\n\nThis type of data is trickier. In this case, there's a pattern where you want to remove a string that starts with an exclamation point and then has an unknown number of characters after it. When you need to match patterns of character, you can use [regular expressions](https://en.wikipedia.org/wiki/Regular_expression).\n\nThe replace method can take a regular expression. So\ndf['data'].replace('?.+', regex=True) where '?.+' means find a set of characters that starts with a question mark is then followed by one or more characters. You can see a [regular expression cheat sheet](https://medium.com/factory-mind/regex-tutorial-a-simple-cheatsheet-by-examples-649dc1c3f285) here.\n\nFix these issues in the code cell below.", "_____no_output_____" ] ], [ [ "# TODO: In the sector1 variable, replace the string '!$10' with nan\n# Put the results back into the sector1 variable\n# HINT: you can use the pandas replace() method and numpy.nan\nsector['sector1'] = sector['sector1'].replace('!$10', np.nan)\n\n\n# TODO: In the sector1 variable, remove the last 10 or 11 characters from the sector1 variable.\n# HINT: There is more than one way to do this. To do it with one line of code,\n# you can use the replace method with a regex expression '!.+'\n# That regex expression looks for a string with an exclamation\n# point followed by one or more characters\nsector['sector1'] = sector['sector1'].replace('!.+', '', regex=True)\n\n\n# TODO: Remove the string '(Historic)' from the sector1 variable\n# HINT: You can use the replace method\n#sector['sector1'] = sector['sector1'].replace('(Historic)', '', regex=True)\nsector['sector1'] = sector['sector1'].replace('^(\\(Historic\\))', '', regex=True)\n\nprint('Number of unique sectors after cleaning:', len(list(sector['sector1'].unique())))\nprint('Percentage of null values after cleaning:', 100 * sector['sector1'].isnull().sum() / sector['sector1'].shape[0])", "Number of unique sectors after cleaning: 156\nPercentage of null values after cleaning: 0.0\n" ] ], [ [ "Now there are 156 unique categorical values. That's better than 3060. If you were going to use this data with a supervised learning machine model, you could try converting these 156 values to dummy variables. You'd still have to train and test a model to see if those are good features.\n\nYou could try to consolidate similar categories together, which is what the challenge exercise in part 4 is about.\n\nThere are also still many entries with NaN values. How could you fill these in?\n\nYou might try to determine an appropriate category from the 'project_name' or 'lendinginstr' variables. If you make dummy variables including NaN values, then you could consider a feature with all zeros to represent NaN. Or you could delete these records from the data set. Pandas will ignore NaN values by default. That means, for a given row, all dummy variables will have a value of 0 if the sector1 value was NaN.\n\nDon't forget about the bigger context! This data is being prepared for a machine learning algorithm. Whatever techniques you use to engineer new features, you'll need to use those when running your model on new data. So if your new data does not contain a sector1 value, you'll have to run whatever feature engineering processes you did on your training set.", "_____no_output_____" ], [ "# Get Dummies\nIn this next exercise, use the pandas pd.get_dummies() method to create dummy variables. Then use the concat() method to concatenate the dummy variables to a dataframe that contains the project totalamt variable and the project year from the boardapprovaldate.", "_____no_output_____" ] ], [ [ "# TODO: Create dummy variables from the sector1 data. Put the results into a dataframe called dummies\n# Hint: Use the get_dummies method\ndummies = pd.get_dummies(sector['sector1'])\n\n# TODO: Create a new dataframe called df by \n# filtering the projects data for the totalamt and\n# the year from boardapprovaldate\n \nprojects['year'] = projects['boardapprovaldate'].dt.year\n\ndf = projects[['totalamt','year']]\n \n\n\n# TODO: Concatenate the results of dummies and projects\n# into a single data frame\ndf_final = pd.concat([df, dummies], axis=1)\n\ndf_final.head()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
c52e3ef88bfa2e1abd9873df7fee0deb6cab9221
95,891
ipynb
Jupyter Notebook
Python Basics Course/PY0101EN-5-1-Numpy1D.ipynb
Pugnatore/IBM_Professional_Certification
f70fd8e78850eb54b7cef60d4e865df0748751e9
[ "MIT" ]
null
null
null
Python Basics Course/PY0101EN-5-1-Numpy1D.ipynb
Pugnatore/IBM_Professional_Certification
f70fd8e78850eb54b7cef60d4e865df0748751e9
[ "MIT" ]
null
null
null
Python Basics Course/PY0101EN-5-1-Numpy1D.ipynb
Pugnatore/IBM_Professional_Certification
f70fd8e78850eb54b7cef60d4e865df0748751e9
[ "MIT" ]
null
null
null
41.691739
16,452
0.734042
[ [ [ "<div class=\"alert alert-block alert-info\" style=\"margin-top: 20px\">\n <a href=\"https://cocl.us/topNotebooksPython101Coursera\">\n <img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Ad/TopAd.png\" width=\"750\" align=\"center\">\n </a>\n</div>", "_____no_output_____" ], [ "<a href=\"https://cognitiveclass.ai/\">\n <img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Ad/CCLog.png\" width=\"200\" align=\"center\">\n</a>", "_____no_output_____" ], [ "<h1>1D <code>Numpy</code> in Python</h1>", "_____no_output_____" ], [ "<p><strong>Welcome!</strong> This notebook will teach you about using <code>Numpy</code> in the Python Programming Language. By the end of this lab, you'll know what <code>Numpy</code> is and the <code>Numpy</code> operations.</p>", "_____no_output_____" ], [ "<h2>Table of Contents</h2>\n<div class=\"alert alert-block alert-info\" style=\"margin-top: 20px\">\n <ul>\n <li><a href=\"pre\">Preparation</a></li>\n <li>\n <a href=\"numpy\">What is Numpy?</a>\n <ul>\n <li><a href=\"type\">Type</a></li>\n <li><a href=\"val\">Assign Value</a></li>\n <li><a href=\"slice\">Slicing</a></li>\n <li><a href=\"list\">Assign Value with List</a></li>\n <li><a href=\"other\">Other Attributes</a></li>\n </ul>\n </li>\n <li>\n <a href=\"op\">Numpy Array Operations</a>\n <ul>\n <li><a href=\"add\">Array Addition</a></li>\n <li><a href=\"multi\">Array Multiplication</a></li>\n <li><a href=\"prod\">Product of Two Numpy Arrays</a></li>\n <li><a href=\"dot\">Dot Product</a></li>\n <li><a href=\"cons\">Adding Constant to a Numpy Array</a></li>\n </ul>\n </li>\n <li><a href=\"math\">Mathematical Functions</a></li>\n <li><a href=\"lin\">Linspace</a></li>\n </ul>\n <p>\n Estimated time needed: <strong>30 min</strong>\n </p>\n</div>\n\n<hr>", "_____no_output_____" ], [ "<h2 id=\"pre\">Preparation</h2>", "_____no_output_____" ] ], [ [ "# Import the libraries\n\nimport time \nimport sys\nimport numpy as np \n\nimport matplotlib.pyplot as plt\n%matplotlib inline ", "_____no_output_____" ], [ "# Plotting functions\n\ndef Plotvec1(u, z, v):\n \n ax = plt.axes()\n ax.arrow(0, 0, *u, head_width=0.05, color='r', head_length=0.1)\n plt.text(*(u + 0.1), 'u')\n \n ax.arrow(0, 0, *v, head_width=0.05, color='b', head_length=0.1)\n plt.text(*(v + 0.1), 'v')\n ax.arrow(0, 0, *z, head_width=0.05, head_length=0.1)\n plt.text(*(z + 0.1), 'z')\n plt.ylim(-2, 2)\n plt.xlim(-2, 2)\n\ndef Plotvec2(a,b):\n ax = plt.axes()\n ax.arrow(0, 0, *a, head_width=0.05, color ='r', head_length=0.1)\n plt.text(*(a + 0.1), 'a')\n ax.arrow(0, 0, *b, head_width=0.05, color ='b', head_length=0.1)\n plt.text(*(b + 0.1), 'b')\n plt.ylim(-2, 2)\n plt.xlim(-2, 2)", "_____no_output_____" ] ], [ [ "Create a Python List as follows:", "_____no_output_____" ] ], [ [ "# Create a python list\n\na = [\"0\", 1, \"two\", \"3\", 4]", "_____no_output_____" ] ], [ [ "We can access the data via an index:", "_____no_output_____" ], [ "<img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%205/Images/NumOneList.png\" width=\"660\" />", "_____no_output_____" ], [ "We can access each element using a square bracket as follows: ", "_____no_output_____" ] ], [ [ "# Print each element\n\nprint(\"a[0]:\", a[0])\nprint(\"a[1]:\", a[1])\nprint(\"a[2]:\", a[2])\nprint(\"a[3]:\", a[3])\nprint(\"a[4]:\", a[4])", "a[0]: 0\na[1]: 1\na[2]: two\na[3]: 3\na[4]: 4\n" ] ], [ [ "<hr>", "_____no_output_____" ], [ "<h2 id=\"numpy\">What is Numpy?</h2>", "_____no_output_____" ], [ "A numpy array is similar to a list. It's usually fixed in size and each element is of the same type. We can cast a list to a numpy array by first importing numpy: ", "_____no_output_____" ] ], [ [ "# import numpy library\n\nimport numpy as np ", "_____no_output_____" ] ], [ [ " We then cast the list as follows:", "_____no_output_____" ] ], [ [ "# Create a numpy array\n\na = np.array([0, 1, 2, 3, 4])\na", "_____no_output_____" ] ], [ [ "Each element is of the same type, in this case integers: ", "_____no_output_____" ], [ "<img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%205/Images/NumOneNp.png\" width=\"500\" />", "_____no_output_____" ], [ " As with lists, we can access each element via a square bracket:", "_____no_output_____" ] ], [ [ "# Print each element\n\nprint(\"a[0]:\", a[0])\nprint(\"a[1]:\", a[1])\nprint(\"a[2]:\", a[2])\nprint(\"a[3]:\", a[3])\nprint(\"a[4]:\", a[4])", "a[0]: 0\na[1]: 1\na[2]: 2\na[3]: 3\na[4]: 4\n" ] ], [ [ "<h3 id=\"type\">Type</h3>", "_____no_output_____" ], [ "If we check the type of the array we get <b>numpy.ndarray</b>:", "_____no_output_____" ] ], [ [ "# Check the type of the array\n\nother = np.array([[6,4,2],[7,6,3]])\ntype(other)\nother.shape", "_____no_output_____" ] ], [ [ "As numpy arrays contain data of the same type, we can use the attribute \"dtype\" to obtain the Data-type of the array’s elements. In this case a 64-bit integer: \n", "_____no_output_____" ] ], [ [ "# Check the type of the values stored in numpy array\n\na.dtype", "_____no_output_____" ] ], [ [ "We can create a numpy array with real numbers:", "_____no_output_____" ] ], [ [ "# Create a numpy array\n\nb = np.array([3.1, 11.02, 6.2, 213.2, 5.2])", "_____no_output_____" ] ], [ [ "When we check the type of the array we get <b>numpy.ndarray</b>:", "_____no_output_____" ] ], [ [ "# Check the type of array\n\ntype(b)", "_____no_output_____" ] ], [ [ "If we examine the attribute <code>dtype</code> we see float 64, as the elements are not integers: ", "_____no_output_____" ] ], [ [ "# Check the value type\n\nb.dtype", "_____no_output_____" ] ], [ [ "<h3 id=\"val\">Assign value</h3>", "_____no_output_____" ], [ "We can change the value of the array, consider the array <code>c</code>:", "_____no_output_____" ] ], [ [ "# Create numpy array\n\nc = np.array([20, 1, 2, 3, 4])\nc", "_____no_output_____" ] ], [ [ "We can change the first element of the array to 100 as follows:", "_____no_output_____" ] ], [ [ "# Assign the first element to 100\n\nc[0] = 100\nc", "_____no_output_____" ] ], [ [ "We can change the 5th element of the array to 0 as follows:", "_____no_output_____" ] ], [ [ "# Assign the 5th element to 0\n\nc[4] = 0\nc", "_____no_output_____" ] ], [ [ "<h3 id=\"slice\">Slicing</h3>", "_____no_output_____" ], [ "Like lists, we can slice the numpy array, and we can select the elements from 1 to 3 and assign it to a new numpy array <code>d</code> as follows:", "_____no_output_____" ] ], [ [ "# Slicing the numpy array\n\nd = c[1:4]\nd", "_____no_output_____" ] ], [ [ "We can assign the corresponding indexes to new values as follows: ", "_____no_output_____" ] ], [ [ "# Set the fourth element and fifth element to 300 and 400\n\nc[3:5] = 300, 400\nc", "_____no_output_____" ] ], [ [ "<h3 id=\"list\">Assign Value with List</h3>", "_____no_output_____" ], [ "Similarly, we can use a list to select a specific index.\nThe list ' select ' contains several values:\n", "_____no_output_____" ] ], [ [ "# Create the index list\n\nselect = [0, 2, 3]", "_____no_output_____" ] ], [ [ "We can use the list as an argument in the brackets. The output is the elements corresponding to the particular index:", "_____no_output_____" ] ], [ [ "# Use List to select elements\n\nd = c[select]\ntype(d)", "_____no_output_____" ] ], [ [ "We can assign the specified elements to a new value. For example, we can assign the values to 100 000 as follows:", "_____no_output_____" ] ], [ [ "# Assign the specified elements to new value\n\nc[select] = 100000\nc", "_____no_output_____" ] ], [ [ "<h3 id=\"other\">Other Attributes</h3>", "_____no_output_____" ], [ "Let's review some basic array attributes using the array <code>a</code>:", "_____no_output_____" ] ], [ [ "# Create a numpy array\n\na = np.array([0, 1, 2, 3, 4])\na", "_____no_output_____" ] ], [ [ "The attribute <code>size</code> is the number of elements in the array:", "_____no_output_____" ] ], [ [ "# Get the size of numpy array\n\na.size", "_____no_output_____" ] ], [ [ "The next two attributes will make more sense when we get to higher dimensions but let's review them. The attribute <code>ndim</code> represents the number of array dimensions or the rank of the array, in this case, one:", "_____no_output_____" ] ], [ [ "# Get the number of dimensions of numpy array\n\na.ndim", "_____no_output_____" ] ], [ [ "The attribute <code>shape</code> is a tuple of integers indicating the size of the array in each dimension:", "_____no_output_____" ] ], [ [ "# Get the shape/size of numpy array\n\na.shape", "_____no_output_____" ], [ "# Create a numpy array\n\na = np.array([1, -1, 1, -1])", "_____no_output_____" ], [ "# Get the mean of numpy array\n\nmean = a.mean()\nmean", "_____no_output_____" ], [ "# Get the standard deviation of numpy array\n\nstandard_deviation=a.std()\nstandard_deviation", "_____no_output_____" ], [ "# Create a numpy array\n\nb = np.array([-1, 2, 3, 4, 5])\nb", "_____no_output_____" ], [ "# Get the biggest value in the numpy array\n\nmax_b = b.max()\nmax_b", "_____no_output_____" ], [ "# Get the smallest value in the numpy array\n\nmin_b = b.min()\nmin_b", "_____no_output_____" ] ], [ [ "<hr>", "_____no_output_____" ], [ "<h2 id=\"op\">Numpy Array Operations</h2>", "_____no_output_____" ], [ "<h3 id=\"add\">Array Addition</h3>", "_____no_output_____" ], [ "Consider the numpy array <code>u</code>:", "_____no_output_____" ] ], [ [ "u = np.array([1, 0])\nu", "_____no_output_____" ] ], [ [ "Consider the numpy array <code>v</code>:", "_____no_output_____" ] ], [ [ "v = np.array([0, 1])\nv", "_____no_output_____" ] ], [ [ "We can add the two arrays and assign it to z:", "_____no_output_____" ] ], [ [ "# Numpy Array Addition\n\nz = u + v\nz", "_____no_output_____" ] ], [ [ " The operation is equivalent to vector addition:", "_____no_output_____" ] ], [ [ "# Plot numpy arrays\n\nPlotvec1(u, z, v)", "_____no_output_____" ] ], [ [ "<h3 id=\"multi\">Array Multiplication</h3>", "_____no_output_____" ], [ "Consider the vector numpy array <code>y</code>:", "_____no_output_____" ] ], [ [ "# Create a numpy array\n\ny = np.array([1, 2])\ny", "_____no_output_____" ] ], [ [ "We can multiply every element in the array by 2:", "_____no_output_____" ] ], [ [ "# Numpy Array Multiplication\n\nz = 2 * y\nz", "_____no_output_____" ] ], [ [ " This is equivalent to multiplying a vector by a scaler: ", "_____no_output_____" ], [ "<h3 id=\"prod\">Product of Two Numpy Arrays</h3>", "_____no_output_____" ], [ "Consider the following array <code>u</code>:", "_____no_output_____" ] ], [ [ "# Create a numpy array\n\nu = np.array([1, 2])\nu", "_____no_output_____" ] ], [ [ "Consider the following array <code>v</code>:", "_____no_output_____" ] ], [ [ "# Create a numpy array\n\nv = np.array([3, 2])\nv", "_____no_output_____" ] ], [ [ " The product of the two numpy arrays <code>u</code> and <code>v</code> is given by:", "_____no_output_____" ] ], [ [ "# Calculate the production of two numpy arrays\n\nz = u * v\nz", "_____no_output_____" ] ], [ [ "<h3 id=\"dot\">Dot Product</h3>", "_____no_output_____" ], [ "The dot product of the two numpy arrays <code>u</code> and <code>v</code> is given by:", "_____no_output_____" ] ], [ [ "# Calculate the dot product\n\nnp.dot(u, v)", "_____no_output_____" ] ], [ [ "<h3 id=\"cons\">Adding Constant to a Numpy Array</h3>", "_____no_output_____" ], [ "Consider the following array: ", "_____no_output_____" ] ], [ [ "# Create a constant to numpy array\n\nu = np.array([1, 2, 3, -1]) \nu", "_____no_output_____" ] ], [ [ "Adding the constant 1 to each element in the array:", "_____no_output_____" ] ], [ [ "# Add the constant to array\n\nu + 1", "_____no_output_____" ] ], [ [ " The process is summarised in the following animation:", "_____no_output_____" ], [ "<img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%205/Images/NumOneAdd.gif\" width=\"500\" />", "_____no_output_____" ], [ "<hr>", "_____no_output_____" ], [ "<h2 id=\"math\">Mathematical Functions</h2>", "_____no_output_____" ], [ " We can access the value of pie in numpy as follows :", "_____no_output_____" ] ], [ [ "# The value of pie\n\nnp.pi", "_____no_output_____" ] ], [ [ " We can create the following numpy array in Radians:", "_____no_output_____" ] ], [ [ "# Create the numpy array in radians\n\nx = np.array([0, np.pi/2 , np.pi])", "_____no_output_____" ] ], [ [ "We can apply the function <code>sin</code> to the array <code>x</code> and assign the values to the array <code>y</code>; this applies the sine function to each element in the array: ", "_____no_output_____" ] ], [ [ "# Calculate the sin of each elements\n\ny = np.sin(x)\ny", "_____no_output_____" ] ], [ [ "<hr>", "_____no_output_____" ], [ "<h2 id=\"lin\">Linspace</h2>", "_____no_output_____" ], [ " A useful function for plotting mathematical functions is \"linespace\". Linespace returns evenly spaced numbers over a specified interval. We specify the starting point of the sequence and the ending point of the sequence. The parameter \"num\" indicates the Number of samples to generate, in this case 5:", "_____no_output_____" ] ], [ [ "# Makeup a numpy array within [-2, 2] and 5 elements\n\nnp.linspace(-2, 2, num=5)", "_____no_output_____" ] ], [ [ "If we change the parameter <code>num</code> to 9, we get 9 evenly spaced numbers over the interval from -2 to 2: ", "_____no_output_____" ] ], [ [ "# Makeup a numpy array within [-2, 2] and 9 elements\n\nnp.linspace(-2, 2, num=9)", "_____no_output_____" ] ], [ [ "We can use the function line space to generate 100 evenly spaced samples from the interval 0 to 2π: ", "_____no_output_____" ] ], [ [ "# Makeup a numpy array within [0, 2π] and 100 elements \n\nx = np.linspace(0, 2*np.pi, num=100)", "_____no_output_____" ] ], [ [ "We can apply the sine function to each element in the array <code>x</code> and assign it to the array <code>y</code>: ", "_____no_output_____" ] ], [ [ "# Calculate the sine of x list\n\ny = np.sin(x)", "_____no_output_____" ], [ "# Plot the result\n\nplt.plot(x, y)", "_____no_output_____" ] ], [ [ "<hr>", "_____no_output_____" ], [ "<h2 id=\"quiz\">Quiz on 1D Numpy Array</h2>", "_____no_output_____" ], [ "Implement the following vector subtraction in numpy: u-v", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute\n\nu = np.array([1, 0])\nv = np.array([0, 1])\nu-v", "_____no_output_____" ] ], [ [ "Double-click __here__ for the solution.\n\n<!-- Your answer is below:\nu - v\n-->", "_____no_output_____" ], [ "<hr>", "_____no_output_____" ], [ "Multiply the numpy array z with -2:", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute\n\nz = np.array([2, 4])\nz * -2", "_____no_output_____" ] ], [ [ "Double-click __here__ for the solution.\n\n<!-- Your answer is below:\n-2 * z\n-->", "_____no_output_____" ], [ "<hr>", "_____no_output_____" ], [ "Consider the list <code>[1, 2, 3, 4, 5]</code> and <code>[1, 0, 1, 0, 1]</code>, and cast both lists to a numpy array then multiply them together:", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute\nnp.array([1,2,3,4,5]) * np.array([1,0,1,0,1])", "_____no_output_____" ] ], [ [ "Double-click __here__ for the solution.\n\n<!-- Your answer is below:\na = np.array([1, 2, 3, 4, 5])\nb = np.array([1, 0, 1, 0, 1])\na * b\n-->", "_____no_output_____" ], [ "<hr>", "_____no_output_____" ], [ "Convert the list <code>[-1, 1]</code> and <code>[1, 1]</code> to numpy arrays <code>a</code> and <code>b</code>. Then, plot the arrays as vectors using the fuction <code>Plotvec2</code> and find the dot product:", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute\na = np.array([-1,1])\nb = np.array([1,1])\nPlotvec2(a,b)\nnp.dot(a,b)", "_____no_output_____" ] ], [ [ "Double-click __here__ for the solution.\n\n<!-- Your answer is below:\na = np.array([-1, 1])\nb = np.array([1, 1])\nPlotvec2(a, b)\nprint(\"The dot product is\", np.dot(a,b))\n-->", "_____no_output_____" ], [ "<hr>", "_____no_output_____" ], [ "Convert the list <code>[1, 0]</code> and <code>[0, 1]</code> to numpy arrays <code>a</code> and <code>b</code>. Then, plot the arrays as vectors using the function <code>Plotvec2</code> and find the dot product:", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute\na = np.array([1,0])\nb = np.array([0,1])\nPlotvec2(a,b)\nnp.dot(a,b)", "_____no_output_____" ] ], [ [ "Double-click __here__ for the solution.\n\n<!-- \na = np.array([1, 0])\nb = np.array([0, 1])\nPlotvec2(a, b)\nprint(\"The dot product is\", np.dot(a, b))\n -->", "_____no_output_____" ], [ "<hr>", "_____no_output_____" ], [ "Convert the list <code>[1, 1]</code> and <code>[0, 1]</code> to numpy arrays <code>a</code> and <code>b</code>. Then plot the arrays as vectors using the fuction <code>Plotvec2</code> and find the dot product:", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute\na = np.array([1,1])\nb = np.array([0,1])\nPlotvec2(a,b)\nnp.dot(a,b)", "_____no_output_____" ] ], [ [ "Double-click __here__ for the solution.\n\n<!-- \na = np.array([1, 1])\nb = np.array([0, 1])\nPlotvec2(a, b)\nprint(\"The dot product is\", np.dot(a, b))\nprint(\"The dot product is\", np.dot(a, b))\n -->", "_____no_output_____" ], [ "<hr>", "_____no_output_____" ], [ "Why are the results of the dot product for <code>[-1, 1]</code> and <code>[1, 1]</code> and the dot product for <code>[1, 0]</code> and <code>[0, 1]</code> zero, but not zero for the dot product for <code>[1, 1]</code> and <code>[0, 1]</code>? <p><i>Hint: Study the corresponding figures, pay attention to the direction the arrows are pointing to.</i></p>", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute\n\"Because the last ones are not perpendicular.\"", "_____no_output_____" ] ], [ [ "Double-click __here__ for the solution.\n\n<!-- \nThe vectors used for question 4 and 5 are perpendicular. As a result, the dot product is zero. \n-->", "_____no_output_____" ], [ "<hr>\n<h2>The last exercise!</h2>\n<p>Congratulations, you have completed your first lesson and hands-on lab in Python. However, there is one more thing you need to do. The Data Science community encourages sharing work. The best way to share and showcase your work is to share it on GitHub. By sharing your notebook on GitHub you are not only building your reputation with fellow data scientists, but you can also show it off when applying for a job. Even though this was your first piece of work, it is never too early to start building good habits. So, please read and follow <a href=\"https://cognitiveclass.ai/blog/data-scientists-stand-out-by-sharing-your-notebooks/\" target=\"_blank\">this article</a> to learn how to share your work.\n<hr>", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-info\" style=\"margin-top: 20px\">\n<h2>Get IBM Watson Studio free of charge!</h2>\n <p><a href=\"https://cocl.us/bottemNotebooksPython101Coursera\"><img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Ad/BottomAd.png\" width=\"750\" align=\"center\"></a></p>\n</div>", "_____no_output_____" ], [ "<h3>About the Authors:</h3> \n<p><a href=\"https://www.linkedin.com/in/joseph-s-50398b136/\" target=\"_blank\">Joseph Santarcangelo</a> is a Data Scientist at IBM, and holds a PhD in Electrical Engineering. His research focused on using Machine Learning, Signal Processing, and Computer Vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD.</p>", "_____no_output_____" ], [ "Other contributors: <a href=\"www.linkedin.com/in/jiahui-mavis-zhou-a4537814a\">Mavis Zhou</a>", "_____no_output_____" ], [ "<hr>", "_____no_output_____" ], [ "<p>Copyright &copy; 2018 IBM Developer Skills Network. This notebook and its source code are released under the terms of the <a href=\"https://cognitiveclass.ai/mit-license/\">MIT License</a>.</p>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
c52e406f8efddd55964fd22f99ecdd32e08c3097
368,277
ipynb
Jupyter Notebook
scripts/experiment_loader.ipynb
hurwitzlab/b2_drought
581ee064a9821f5c2699370875aca66b5632235c
[ "MIT" ]
null
null
null
scripts/experiment_loader.ipynb
hurwitzlab/b2_drought
581ee064a9821f5c2699370875aca66b5632235c
[ "MIT" ]
null
null
null
scripts/experiment_loader.ipynb
hurwitzlab/b2_drought
581ee064a9821f5c2699370875aca66b5632235c
[ "MIT" ]
null
null
null
256.281837
5,145
0.699672
[ [ [ "import graphlab", "_____no_output_____" ], [ "#input files\ndir_csv=\"/Users/aponsero/Documents/UA_POSTDOC/projects/B2_project/MongoDB_ingest/test_passive_samplers/experiments\"\ndir_voc=\"/Users/aponsero/Documents/UA_POSTDOC/projects/B2_project/b2_drought/vocabulary\"\nfile_csv=dir_csv+\"/mapped_converted_VOC.csv\"\nvocabulary_file=dir_voc+\"/experiment_controlled_vocabulary.csv\"\ntypology_file=dir_voc+\"/experiment_typology.csv\"\n\n#output file\nout_dir=\"/Users/aponsero/Documents/UA_POSTDOC/projects/B2_project/MongoDB_ingest/test_passive_samplers/experiments\"\njson_file=out_dir+\"/experiment_leaves.json\"\nfout = open(json_file, \"a\")\n\n#experiment type\nmy_exp_type=\"VOCs\"", "_____no_output_____" ], [ "my_frame=graphlab.SFrame.read_csv(file_csv, column_type_hints=str)\nmy_frame", "_____no_output_____" ], [ "#minimum information checklist\nmy_typo=graphlab.SFrame.read_csv(typology_file, column_type_hints=str)\n\nminimum_tab=my_typo[(my_typo['Types'] == my_exp_type)]\nminimum_fields=minimum_tab['required_fields']\n\nfor x in minimum_fields:\n if x in my_frame.column_names():\n print(x+\": OK\")\n else :\n print(x+\" : not found\")\n\n\ncheck = all(elem in my_frame.column_names() for elem in minimum_fields)\n \nif check:\n print(\"The document passes the checklist\") \nelse :\n print(\"Warning : minimum attributes missing !\")", "_____no_output_____" ], [ "my_voc=graphlab.SFrame.read_csv(vocabulary_file, column_type_hints=str)\nmy_voc", "_____no_output_____" ], [ "# find the object categories and create type finder\nSpecimen_description=[]\nExperiment_description=[]\nResult=[]\nFile_list=[]\nNo_object=[]\nattribute_types={}\n\nfor i in my_frame.column_names():\n curr_data=my_voc[(my_voc['Term'] == i)]\n section=curr_data['Section_object']\n att_type=curr_data['Type']\n #add type in attribute_types\n attribute_types[i] = att_type[0]\n \n #add the attribute in the object categories\n if section[0] == \"Specimen_description\":\n Specimen_description.append(i)\n elif section[0] == \"Experiment_description\":\n Experiment_description.append(i)\n elif section[0] == \"Result\":\n Result.append(i)\n elif section[0] == \"File\":\n File_list.append(i)\n else:\n No_object.append(i)\n \nprint(Specimen_description)\nprint(Experiment_description)\nprint(Result)\nprint(File_list)\nprint(No_object)\n\nprint(attribute_types)", "['sample_ID', 'sample_type']\n['run_name', 'experiment_type', 'level', 'experiment_date-time', 'operator', 'protocol_ID']\n['CIS-3-HEXENOL_CAS', 'CIS-3-HEXENOL_RT_min', 'CIS-3-HEXENOL_match_factor', 'CIS-3-HEXENOL_area', 'tricyclene_CAS', 'tricyclene_RT_min', 'tricyclene_match_factor', 'tricyclene_area', 'ALPHA-PINENE_CAS', 'ALPHA-PINENE_RT_min', 'ALPHA-PINENE_match_factor', 'ALPHA-PINENE_area', 'TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS', 'TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min', 'TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor', 'TRANS-8-METHYLBICYCLO-NON-3-ENE_area', 'CAMPHENE_CAS', 'CAMPHENE_RT_min', 'CAMPHENE_match_factor', 'CAMPHENE_area', '2-BETA-PINENE_CAS', '2-BETA-PINENE_RT_min', '2-BETA-PINENE_match_factor', '2-BETA-PINENE_area', 'Linalool_CAS', 'Linalool_RT_min', 'Linalool_match_factor', 'Linalool_area', '3-Hexen-1-ol-acetate_CAS', '3-Hexen-1-ol-acetate_RT_min', '3-Hexen-1-ol-acetate_match_factor', '3-Hexen-1-ol-acetate_area', 'Benzene_CAS', 'Benzene_RT_min', 'Benzene_match_factor', 'Benzene_area', 'MT-1_RT_min', 'MT-1_area', 'ALPHA_TERPINENE_CAS', 'ALPHA_TERPINENE_RT_min', 'ALPHA_TERPINENE_match_factor', 'ALPHA_TERPINENE_area', 'PARA_CYMENE_CAS', 'PARA_CYMENE_RT_min', 'PARA_CYMENE_match_factor', 'PARA_CYMENE_area', 'Bornylene_CAS', 'Bornylene_RT_min', 'Bornylene_match_factor', 'Bornylene_area', 'GAMMA_TERPINENE_CAS', 'GAMMA_TERPINENE_RT_min', 'GAMMA_TERPINENE_match_factor', 'GAMMA_TERPINENE_area', '1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS', '1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min', '1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor', '1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area', 'Acetic_acid_2-ethylhexyl_ester_CAS', 'Acetic_acid_2-ethylhexyl_ester_RT_min', 'Acetic_acid_2-ethylhexyl_ester_match_factor', 'Acetic_acid_2-ethylhexyl_ester_area', 'N-DECANAL_CAS', 'N-DECANAL_RT_min', 'N-DECANAL_match_factor', 'N-DECANAL_area', 'Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS', 'Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min', 'Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor', 'Bicyclo311hept-3-en-2-one_466-trimethyl-_area', '26-Dichlorostyrene_CAS', '26-Dichlorostyrene_RT_min', '26-Dichlorostyrene_match_factor', '26-Dichlorostyrene_area', 'Benzothiazole_CAS', 'Benzothiazole_RT_min', 'Benzothiazole_match_factor', 'Benzothiazole_area', 'Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS', 'Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min', 'Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor', 'Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area', 'Benzaldehyde_26-dichloro-_CAS', 'Benzaldehyde_26-dichloro-_RT_min', 'Benzaldehyde_26-dichloro-_match_factor', 'Benzaldehyde_26-dichloro-_area', '2H-Azepin-2-one_hexahydro-_CAS', '2H-Azepin-2-one_hexahydro-_RT_min', '2H-Azepin-2-one_hexahydro-_match_factor', '2H-Azepin-2-one_hexahydro-_area', '4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS', '4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min', '4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor', '4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area', 'beta-Cubebene_CAS', 'beta-Cubebene_RT_min', 'beta-Cubebene_match_factor', 'beta-Cubebene_area', 'EE-ALPHA-FARNESENE_CAS', 'EE-ALPHA-FARNESENE_RT_min', 'EE-ALPHA-FARNESENE_match_factor', 'EE-ALPHA-FARNESENE_area', '1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS', '1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min', '1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor', '1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area', '12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS', '12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min', '12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor', '12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area']\n['raw_data_file']\n[]\n{'tricyclene_area': 'num', 'tricyclene_CAS': 'string', '12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor': 'num', 'CAMPHENE_area': 'num', 'beta-Cubebene_match_factor': 'num', 'CIS-3-HEXENOL_area': 'num', 'protocol_ID': 'string', 'CAMPHENE_CAS': 'string', 'TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor': 'num', 'Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor': 'num', 'CAMPHENE_match_factor': 'num', 'Benzene_RT_min': 'num', '2H-Azepin-2-one_hexahydro-_match_factor': 'num', 'PARA_CYMENE_match_factor': 'num', 'ALPHA-PINENE_match_factor': 'num', '12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min': 'num', 'level': 'string', 'GAMMA_TERPINENE_RT_min': 'num', 'Acetic_acid_2-ethylhexyl_ester_CAS': 'string', 'ALPHA-PINENE_CAS': 'string', '4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min': 'num', 'TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min': 'num', '2-BETA-PINENE_CAS': 'string', '2-BETA-PINENE_RT_min': 'num', '1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor': 'num', 'Linalool_area': 'num', 'raw_data_file': 'string', 'CIS-3-HEXENOL_CAS': 'string', '26-Dichlorostyrene_area': 'num', '1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS': 'string', '26-Dichlorostyrene_CAS': 'string', 'Linalool_match_factor': 'num', 'GAMMA_TERPINENE_match_factor': 'num', '4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area': 'num', 'run_name': 'string', '1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min': 'num', '3-Hexen-1-ol-acetate_RT_min': 'num', '12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area': 'num', 'N-DECANAL_RT_min': 'num', 'sample_type': 'string', 'ALPHA-PINENE_area': 'num', 'Benzaldehyde_26-dichloro-_match_factor': 'num', 'PARA_CYMENE_RT_min': 'num', '4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor': 'num', 'Bornylene_match_factor': 'num', '4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS': 'string', 'CAMPHENE_RT_min': 'num', 'Benzaldehyde_26-dichloro-_area': 'num', 'tricyclene_match_factor': 'num', 'PARA_CYMENE_CAS': 'string', '3-Hexen-1-ol-acetate_CAS': 'string', 'beta-Cubebene_CAS': 'string', 'Benzothiazole_area': 'num', 'Benzaldehyde_26-dichloro-_CAS': 'string', '3-Hexen-1-ol-acetate_area': 'num', 'EE-ALPHA-FARNESENE_CAS': 'string', 'experiment_type': 'string', '1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor': 'num', '1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS': 'string', 'Linalool_RT_min': 'num', 'Benzothiazole_CAS': 'string', 'ALPHA_TERPINENE_match_factor': 'num', 'operator': 'string', 'ALPHA_TERPINENE_CAS': 'string', 'Benzothiazole_match_factor': 'num', 'Bornylene_CAS': 'string', 'experiment_date-time': 'date-time', 'TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS': 'string', 'MT-1_RT_min': 'num', 'tricyclene_RT_min': 'num', 'Acetic_acid_2-ethylhexyl_ester_RT_min': 'num', 'Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor': 'num', 'ALPHA_TERPINENE_area': 'num', 'beta-Cubebene_RT_min': 'num', '2H-Azepin-2-one_hexahydro-_area': 'num', '26-Dichlorostyrene_match_factor': 'num', 'Benzene_area': 'num', 'sample_ID': 'string', 'Benzothiazole_RT_min': 'num', 'Benzaldehyde_26-dichloro-_RT_min': 'num', 'Bicyclo311hept-3-en-2-one_466-trimethyl-_area': 'num', 'Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area': 'num', 'Benzene_match_factor': 'num', 'Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min': 'num', 'Acetic_acid_2-ethylhexyl_ester_match_factor': 'num', 'EE-ALPHA-FARNESENE_RT_min': 'num', 'ALPHA-PINENE_RT_min': 'num', '3-Hexen-1-ol-acetate_match_factor': 'num', 'N-DECANAL_CAS': 'string', '2-BETA-PINENE_area': 'num', 'EE-ALPHA-FARNESENE_match_factor': 'num', 'CIS-3-HEXENOL_match_factor': 'num', 'Bornylene_RT_min': 'num', 'beta-Cubebene_area': 'num', 'Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS': 'string', 'MT-1_area': 'num', 'PARA_CYMENE_area': 'num', 'TRANS-8-METHYLBICYCLO-NON-3-ENE_area': 'num', '1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min': 'num', 'GAMMA_TERPINENE_CAS': 'string', 'EE-ALPHA-FARNESENE_area': 'num', '2H-Azepin-2-one_hexahydro-_RT_min': 'num', 'Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS': 'string', 'Acetic_acid_2-ethylhexyl_ester_area': 'num', 'Linalool_CAS': 'string', '2H-Azepin-2-one_hexahydro-_CAS': 'string', '1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area': 'num', 'Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min': 'num', 'ALPHA_TERPINENE_RT_min': 'num', 'N-DECANAL_area': 'num', '1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area': 'num', '12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS': 'string', 'GAMMA_TERPINENE_area': 'num', 'CIS-3-HEXENOL_RT_min': 'num', 'N-DECANAL_match_factor': 'num', '26-Dichlorostyrene_RT_min': 'num', 'Bornylene_area': 'num', 'Benzene_CAS': 'string', '2-BETA-PINENE_match_factor': 'num'}\n" ], [ "nb_samples=my_frame.shape[0]\n\nfor i in range(0,nb_samples):\n curr_slice=my_frame[i]\n line=\"{\"\n #process the specimen description\n for i in range(len(Specimen_description)):\n Key=Specimen_description[i]\n if str(curr_slice[Specimen_description[i]])==\"NA\" or str(curr_slice[Specimen_description[i]])==\"None\":\n attribute=\"\\\"NA\\\"\"\n else:\n if attribute_types[Key]==\"int\" or attribute_types[Key]==\"float\" or attribute_types[Key]==\"num\":\n attribute=str(curr_slice[Specimen_description[i]])\n elif attribute_types[Key]==\"date-time\":\n attribute=\"{$date:\\\"\"+str(curr_slice[Specimen_description[i]])+\"\\\"}\"\n else:\n attribute=\"\\\"\"+str(curr_slice[Specimen_description[i]])+\"\\\"\"\n \n line=line+\"\\\"\"+Key+\"\\\":\"+attribute+\",\"\n\n #print(\"#### \"+line)\n \n #process the Experiment description\n for i in range(len(Experiment_description)):\n Key=Experiment_description[i]\n if str(curr_slice[Experiment_description[i]])==\"NA\" or str(curr_slice[Experiment_description[i]])==\"None\":\n attribute=\"\\\"NA\\\"\"\n else:\n if attribute_types[Key]==\"int\" or attribute_types[Key]==\"float\" or attribute_types[Key]==\"num\":\n attribute=str(curr_slice[Experiment_description[i]])\n elif attribute_types[Key]==\"date-time\":\n attribute=\"{$date:\\\"\"+str(curr_slice[Experiment_description[i]])+\"\\\"}\"\n else:\n attribute=\"\\\"\"+str(curr_slice[Experiment_description[i]])+\"\\\"\"\n \n line=line+\"\\\"\"+Key+\"\\\":\"+attribute+\",\"\n\n #process the Result\n line=line+\"\\\"Result\\\":{\"\n for i in range(len(Result)):\n Key=Result[i]\n if str(curr_slice[Result[i]])==\"NA\" or str(curr_slice[Result[i]])==\"None\":\n attribute=\"\\\"NA\\\"\"\n else:\n if attribute_types[Key]==\"int\" or attribute_types[Key]==\"float\" or attribute_types[Key]==\"num\":\n attribute=str(curr_slice[Result[i]])\n elif attribute_types[Key]==\"date-time\":\n attribute=\"{$date:\\\"\"+str(curr_slice[Result[i]])+\"\\\"}\"\n else:\n attribute=\"\\\"\"+str(curr_slice[Result[i]])+\"\\\"\"\n \n line=line+\"\\\"\"+Key+\"\\\":\"+attribute\n \n if i==len(Result)-1:\n line=line+\"},\"\n else:\n line=line+\",\"\n \n #process file list\n line=line+\"\\\"files\\\":{\"\n for i in range(len(File_list)):\n Key=File_list[i]\n if str(curr_slice[File_list[i]])==\"NA\" or str(curr_slice[File_list[i]])==\"None\":\n attribute=\"\\\"NA\\\"\"\n else:\n if attribute_types[Key]==\"int\" or attribute_types[Key]==\"float\" or attribute_types[Key]==\"num\":\n attribute=str(curr_slice[File_list[i]])\n elif attribute_types[Key]==\"date-time\":\n attribute=\"{$date:\\\"\"+str(curr_slice[File_list[i]])+\"\\\"}\"\n else:\n attribute=\"\\\"\"+str(curr_slice[File_list[i]])+\"\\\"\"\n \n line=line+\"\\\"\"+Key+\"\\\":\"+attribute\n \n if i==len(File_list)-1:\n line=line+\"}\"\n else:\n line=line+\",\"\n\n \n line=line+\"}\"\n fout.write(line+\"\\n\")\n print(line)\n ", "{\"sample_ID\":\"b2_pd_1\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe-011\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-05T10:21:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":27948.34,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":60540.06,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":1669914.77,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":1869.85,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":979199.69,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":103929.6,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":49863.87,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":25458.47,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":975698.66,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":6480.29,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":1092554.96,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":422095.28,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":88979.7,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":14034.45,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":165875.45,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":62869.02,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":18417.41,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":90260.44,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":48829.01,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":38168.44,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":332372.82,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":118357.08,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":151651.79,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":48835.58,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":6799.82,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":10629.02,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":50432.46,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":100019.68},\"files\":{\"raw_data_file\":\"probe-011.D\"}}\n{\"sample_ID\":\"b2_pd_2\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe-012\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-05T11:44:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":1262.43,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":3135.57,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":149743.5,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":1025.06,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":5913.59,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":12981.33,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":1593.32,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":2370.3,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":6771.9,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":0,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":913.38,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":4650.82,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":5395.38,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":872.2,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":25624.57,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":1930.55,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":4564.72,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":119.79,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":32694.29,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":22214.89,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":21920.06,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":48140.23,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":12592444.99,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":56233.1,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":6764.69,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":3252.02,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":99992.85,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":59736.07},\"files\":{\"raw_data_file\":\"probe-012.D\"}}\n{\"sample_ID\":\"b2_pd_3\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe-013\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-05T13:08:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":70.41,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":923.49,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":24643.09,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":2663.42,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":1506.87,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":9369.17,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":0,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":1582.71,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":2082.89,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":0,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":1176.53,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":2870.67,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":4999.54,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":1128.2,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":25718.72,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":2458.11,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":2898.2,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":107.64,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":33313.26,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":19473.93,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":1411957.72,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":57423.55,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":18531271.32,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":67688.23,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":6113.41,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":5292.78,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":117407.07,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":65654.66},\"files\":{\"raw_data_file\":\"probe-013.D\"}}\n{\"sample_ID\":\"b2_pd_4\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe-014\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-05T14:32:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":0,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":15529.96,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":597085.51,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":5568.53,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":39982.51,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":65089.52,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":22735.09,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":2532.49,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":115095.94,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":0,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":3052.91,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":11282.08,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":9273.21,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":3371.48,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":31369.64,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":2648.25,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":1413.5,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":104.25,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":47573.88,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":32476.24,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":310006.65,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":57882.99,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":13053082.5,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":80484.53,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":7830.08,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":5085.69,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":179674.25,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":132588.56},\"files\":{\"raw_data_file\":\"probe-014.D\"}}\n{\"sample_ID\":\"b2_pd_5\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe-015\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-05T15:55:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":0,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":18211.67,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":544561.84,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":5770.64,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":38580.52,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":67522.49,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":17547.03,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":2565.76,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":101507.02,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":0,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":3933.45,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":10202.63,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":13047.5,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":3045.78,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":36532.47,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":5232.88,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":1668.24,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":810.32,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":38233.46,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":18041.39,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":1362060.44,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":106789.65,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":20962646.81,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":68518.88,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":5685.76,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":1750.37,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":142875.16,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":72360.1},\"files\":{\"raw_data_file\":\"probe-015.D\"}}\n{\"sample_ID\":\"b2_pd_6\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe-016\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-05T17:19:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":0,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":0,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":29671.12,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":3150.28,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":1023.56,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":14702.64,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":0,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":2613.1,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":1721.02,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":0,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":1399.32,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":3144.33,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":5292.12,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":1312.64,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":27776.14,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":790.93,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":5263.84,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":156.04,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":39556.72,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":27758.46,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":690579.35,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":66168.54,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":13566491.99,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":74139.65,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":6909.59,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":3799.94,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":132226.02,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":78664.37},\"files\":{\"raw_data_file\":\"probe-016.D\"}}\n{\"sample_ID\":\"b2_pd_7\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe-017\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-05T18:42:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":0,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":10631.65,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":545640.38,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":3288.83,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":19765.92,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":26276.8,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":3547.48,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":2482.21,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":41567.39,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":0,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":1954.91,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":7307.34,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":14071.88,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":2114.58,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":26110.73,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":2217.72,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":1405.64,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":308.41,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":43446.68,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":25576.45,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":1000019.05,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":54840.14,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":13450573.34,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":74884.94,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":6306.79,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":2516.1,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":205129.74,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":122743.88},\"files\":{\"raw_data_file\":\"probe-017.D\"}}\n{\"sample_ID\":\"b2_pd_8\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe-018\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-05T20:06:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":63.47,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":44968.9,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":1295541.99,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":14192.36,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":104150.57,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":113150.76,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":32417.04,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":1704.06,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":96896.76,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":0,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":0,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":15372.18,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":23914.68,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":2882.77,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":22674.65,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":2719.5,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":2688.2,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":612.94,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":30729.79,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":22506.78,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":499037.37,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":68839.6,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":11125024.45,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":72439.85,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":5151.85,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":3073.7,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":171547.55,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":117573.25},\"files\":{\"raw_data_file\":\"probe-018.D\"}}\n{\"sample_ID\":\"b2_pd_9\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe-019\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-05T21:30:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":0,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":4987.19,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":142865.64,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":1559.07,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":10075.55,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":25382.23,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":2845.42,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":2401.47,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":12795.55,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":0,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":3145.02,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":5440.46,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":9966.65,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":1669.81,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":27721.86,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":4092.94,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":2955.53,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":0,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":34371.71,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":17690.73,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":932528.61,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":101765.22,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":17063520.14,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":68498.82,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":5853.19,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":3274.91,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":135978.11,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":110790.51},\"files\":{\"raw_data_file\":\"probe-019.D\"}}\n{\"sample_ID\":\"b2_pd_10\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe-020\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-05T22:53:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":20.91,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":3336.38,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":60882.95,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":659.72,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":5932.58,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":7734.42,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":1512.1,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":439.56,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":4277.31,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":0,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":355.03,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":2588.22,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":2501.12,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":729.23,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":5757.01,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":553.26,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":2044.27,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":0,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":5487.83,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":6733.68,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":119025.1,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":17657.48,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":905885.51,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":12262.39,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":1356.11,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":144.58,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":18553.14,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":7971.15},\"files\":{\"raw_data_file\":\"probe-020.D\"}}\n{\"sample_ID\":\"b2_pd_11\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe_021\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-08T01:58:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":94.85,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":15690.29,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":194942.53,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":2288.67,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":21466.56,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":20356.6,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":9173.53,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":3245.16,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":39996.94,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":0,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":2498.11,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":8606.32,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":7748.33,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":2645.63,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":50006.62,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":5321.13,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":30979.04,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":2905.31,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":39008.16,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":40474.84,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":13481.87,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":76960.13,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":8521091.41,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":49169.44,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":8411.65,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":6481.29,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":48426.24,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":62541},\"files\":{\"raw_data_file\":\"probe_021.D\"}}\n{\"sample_ID\":\"b2_pd_12\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe_022\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-08T03:21:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":165.66,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":35755.01,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":1271712.62,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":13151.31,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":89760.81,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":114618.74,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":43554.26,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":2037.77,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":200899.34,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":0,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":0,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":28543.88,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":27555.39,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":3924.92,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":43331.63,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":2138.89,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":20810,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":2886.42,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":33489.84,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":35798.32,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":7795.96,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":63382.65,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":6992200.07,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":51915.3,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":8110.54,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":4008.04,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":42818.24,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":39673.16},\"files\":{\"raw_data_file\":\"probe_022.D\"}}\n{\"sample_ID\":\"b2_pd_13\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe_023\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-08T04:45:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":219.24,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":46354.2,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":1859685.7,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":16344.18,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":128879.01,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":146242.25,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":65983.64,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":2285.7,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":160526.25,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":0,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":0,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":26843.96,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":109355.92,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":7656.07,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":20645.49,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":7252.64,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":34442.36,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":12170.03,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":19214.41,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":49700.37,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":575757.54,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":29539.35,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":9102753.81,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":47889.36,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":9039.49,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":6250.73,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":64112.92,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":38514.51},\"files\":{\"raw_data_file\":\"probe_023.D\"}}\n{\"sample_ID\":\"b2_pd_14\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe_024\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-08T06:08:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":827.11,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":11523.6,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":558539.44,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":3924.61,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":28827.12,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":78520.5,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":19237.16,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":2261.39,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":61159.52,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":0,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":0,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":12617.36,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":29010.95,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":2081.82,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":30375.54,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":9038.28,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":38453.66,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":13735.77,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":14253.46,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":54589.33,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":723479.86,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":25463.46,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":12627696.54,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":37409.25,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":8872.33,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":4478.77,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":30164.73,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":46613.95},\"files\":{\"raw_data_file\":\"probe_024.D\"}}\n{\"sample_ID\":\"b2_pd_15\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe_025\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-08T07:32:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":355.1,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":26675.38,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":1050540.5,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":11185.84,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":79270.97,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":89491.41,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":38930.46,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":5425.65,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":165391.66,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":0,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":0,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":22164.83,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":23350.32,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":2866.12,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":22120.8,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":15932.07,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":56763.67,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":19757.61,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":14247.62,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":71642.89,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":744205.24,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":28019.18,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":9864730.67,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":38435.33,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":10342.09,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":20847.5,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":24196.22,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":37668.07},\"files\":{\"raw_data_file\":\"probe_025.D\"}}\n{\"sample_ID\":\"b2_pd_16\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe_026\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-08T08:55:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":811.07,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":2040.71,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":35683.3,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":256.84,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":7824.13,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":9940.35,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":3521.27,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":4212.15,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":5577.7,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":0,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":0,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":9094.44,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":21660.37,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":0,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":46762.85,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":18044.2,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":67307.45,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":20101.4,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":19823.02,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":71793.5,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":671731.8,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":32210.98,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":10539494.58,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":23992.16,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":9450.92,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":8735.51,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":9156.81,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":25230.37},\"files\":{\"raw_data_file\":\"probe_026.D\"}}\n{\"sample_ID\":\"b2_pd_17\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe_027\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-08T10:19:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":16349.59,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":81944.23,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":1193057.2,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":103051.44,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":1507133.72,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":69382.05,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":28519.58,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":18330.13,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":701042.39,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":4977.64,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":462886.6,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":320409.04,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":38003.73,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":6692.53,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":43803.38,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":30127.59,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":33945.73,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":24310.63,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":18033.22,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":48452.1,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":5744.3,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":26708.35,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":8443042.26,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":36482.78,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":8193.75,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":6852.56,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":38165.63,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":23588.59},\"files\":{\"raw_data_file\":\"probe_027.D\"}}\n{\"sample_ID\":\"b2_pd_18\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe_028\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-08T11:43:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":185.12,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":31323.81,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":458017.75,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":42240.89,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":60266.43,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":44425.35,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":32055.65,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":3786.64,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":156624.11,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":740.99,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":1496.93,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":24381.23,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":19978.71,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":5378.74,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":31894.93,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":7294.07,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":30481.99,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":13011.74,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":23785.54,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":43833.44,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":789161.63,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":26526.2,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":8601747.21,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":44088.79,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":6770.14,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":9429.69,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":36994.85,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":27830.82},\"files\":{\"raw_data_file\":\"probe_028.D\"}}\n{\"sample_ID\":\"b2_pd_19\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe_029\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-08T13:06:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":176747.65,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":195353.28,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":2064927.92,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":191779.7,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":4975161.83,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":64792.56,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":27395.42,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":85047.74,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":2839857.12,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":20060.54,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":2149798.18,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":1419326.68,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":78977.4,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":26737.93,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":238967.07,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":128332.55,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":31174.34,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":154560.55,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":14271.28,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":50626.08,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":403967.56,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":50720.05,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":4613804.74,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":41286.54,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":6843.13,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":6469.8,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":54220.23,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":226886.88},\"files\":{\"raw_data_file\":\"probe_029.D\"}}\n{\"sample_ID\":\"b2_pd_20\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe_030\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-08T14:30:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":97758.44,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":116236.92,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":1409404.74,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":114681.87,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":2637490.89,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":53942.41,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":20573.8,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":44286.01,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":1307756.38,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":13916.85,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":918260.89,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":673487.39,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":70434.43,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":14210.84,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":66661.85,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":34945.07,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":30496.84,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":75838.47,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":18165.58,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":27002.36,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":756011.34,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":24907.36,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":7317971.52,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":34039.92,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":4649.53,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":8927.2,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":70509.39,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":50940.75},\"files\":{\"raw_data_file\":\"probe_030.D\"}}\n{\"sample_ID\":\"b2_pd_21\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe_031\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-08T15:54:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":0,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":1108.34,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":17891.59,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":122.76,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":6123.38,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":8771.94,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":1437.63,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":4572.5,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":2038.5,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":0,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":1085.46,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":8628.13,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":9769.08,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":1296.23,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":18282.5,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":6072.68,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":29979.25,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":8030.27,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":11127.93,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":20567.81,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":31931.61,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":26739.19,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":7105161.97,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":33320.35,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":3522.82,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":16051.4,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":97877.25,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":57868.66},\"files\":{\"raw_data_file\":\"probe_031.D\"}}\n{\"sample_ID\":\"b2_pd_22\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe_036\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-08T17:17:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":0,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":0,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":13449.7,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":88.59,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":1025.86,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":5276.86,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":0,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":1602.19,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":1632.35,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":0,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":3498.13,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":2880.93,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":3639.21,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":51.1,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":46826.63,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":3853.38,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":17794.17,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":2726.8,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":42231.39,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":45237.85,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":18943.26,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":54073.96,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":6439490.16,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":26707.65,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":7256.66,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":4489.57,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":6833.26,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":79817.61},\"files\":{\"raw_data_file\":\"probe_036.D\"}}\n{\"sample_ID\":\"b2_pd_23\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe_037\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-08T18:41:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":963.74,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":1535.66,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":20878.11,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":110.47,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":25124.79,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":5057.94,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":0,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":1804.85,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":9603.65,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":0,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":13429.86,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":6439.29,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":2910.55,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":48.01,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":40331.16,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":2757.48,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":22732.72,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":1924.8,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":47941.51,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":33992.32,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":21881.76,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":66390.07,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":8689081.39,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":44140.75,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":6819.33,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":4978.83,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":17595.88,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":61354.56},\"files\":{\"raw_data_file\":\"probe_037.D\"}}\n{\"sample_ID\":\"b2_pd_24\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe_038\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-08T20:04:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":89.85,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":0,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":13986.31,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":1552.19,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":7264.35,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":5291.91,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":317.49,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":1885.67,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":8102.64,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":0,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":4720.63,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":5498.5,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":3412.09,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":672.5,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":46745.91,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":1456.82,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":19207.92,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":2565.1,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":38502.88,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":33842.72,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":47447.8,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":87363.62,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":7277617.25,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":35816.62,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":6788.09,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":5348.68,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":6595.75,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":79452.84},\"files\":{\"raw_data_file\":\"probe_038.D\"}}\n{\"sample_ID\":\"b2_pd_25\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe_039\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-08T21:28:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":72.34,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":0,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":16036.89,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":1731.77,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":742.89,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":6341.05,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":2404.7,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":7827.47,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":816.89,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":0,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":0,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":3286.53,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":6378.84,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":0,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":18468.57,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":6411.78,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":31101.19,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":10682.36,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":16724.65,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":43895.07,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":557713.36,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":25167.21,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":9871345.21,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":44388.42,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":6793.43,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":26745.71,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":26198.28,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":48626.92},\"files\":{\"raw_data_file\":\"probe_039.D\"}}\n{\"sample_ID\":\"b2_pd_26\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe_040\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-08T22:52:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":31.03,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":0,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":29677.97,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":2174.72,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":2722.32,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":11502.67,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":1757.04,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":4391.33,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":1725.35,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":0,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":0,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":4876.51,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":9941.32,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":0,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":31735.63,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":3788.3,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":30019.63,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":7471.14,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":35218.93,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":44289.48,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":731021.73,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":51581.15,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":6671348.77,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":48937.19,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":6696.66,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":11036.43,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":48191.56,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":126100.53},\"files\":{\"raw_data_file\":\"probe_040.D\"}}\n{\"sample_ID\":\"b2_pd_27\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe_041\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-09T00:15:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":107.95,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":0,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":41099.09,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":431.62,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":6027.4,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":5716.11,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":2553,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":1436.74,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":2574.25,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":0,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":1616.15,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":3000.56,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":6745.72,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":0,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":17877.59,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":16133.09,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":51197.5,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":14260.1,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":23454.76,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":59131.47,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":18410.02,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":32296.13,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":5188481.47,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":34872.18,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":7638.77,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":4705.89,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":10824.39,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":29640.35},\"files\":{\"raw_data_file\":\"probe_041.D\"}}\n{\"sample_ID\":\"b2_pd_28\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe_042\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-09T01:39:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":295.03,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":0,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":16448.43,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":85.98,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":1207.61,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":3457.59,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":854.45,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":2738.68,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":2173.61,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":0,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":2601.38,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":2417.17,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":5116.83,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":952.07,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":33614.4,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":2692.86,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":19464.48,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":3412.61,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":33092.91,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":34019.01,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":28217.68,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":45676.3,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":7040678.45,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":48055.12,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":6710.97,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":4542.67,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":30078.76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":64135.72},\"files\":{\"raw_data_file\":\"probe_042.D\"}}\n{\"sample_ID\":\"b2_pd_29\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe_043\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-09T03:02:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":540.69,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":0,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":23050.83,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":157.2,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":2717.74,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":5989.15,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":0,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":2483.87,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":1244.9,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":0,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":1664.64,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":2648.49,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":3368.73,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":802.08,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":30059.8,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":4598.55,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":19090.58,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":1742.25,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":28420.4,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":20133.12,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":703197.06,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":61933.64,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":8449963.83,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":39999.38,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":5036.96,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":2969.42,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":33574.47,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":43765.57},\"files\":{\"raw_data_file\":\"probe_043.D\"}}\n{\"sample_ID\":\"b2_pd_30\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe_044\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-09T04:26:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":352.27,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":0,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":17250.75,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":1685.84,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":1927.41,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":6078.42,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":410.94,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":3298.46,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":2182.1,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":0,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":3753.31,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":3917.35,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":3514.01,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":1432.46,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":45309.71,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":4107.33,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":24737.13,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":2245.81,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":42085.52,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":30286.68,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":177715.03,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":119557.52,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":8530615.74,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":45090.55,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":6368.71,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":1084.74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":20940.25,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":75641.03},\"files\":{\"raw_data_file\":\"probe_044.D\"}}\n{\"sample_ID\":\"b2_pd_31\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe_045\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-09T05:49:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":102.49,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":0,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":19274.64,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":97.63,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":1171.3,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":4135.97,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":0,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":2135.44,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":1672.58,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":0,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":2152.98,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":3276.29,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":3585.72,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":1366.77,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":30339.76,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":2021.05,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":19471.06,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":2060.52,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":34673.15,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":29841.61,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":6086.93,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":69243.48,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":8071287.1,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":42867.29,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":6234.88,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":1234.15,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":20619.37,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":47544.16},\"files\":{\"raw_data_file\":\"probe_045.D\"}}\n{\"sample_ID\":\"b2_pd_32\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe_047\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-09T13:32:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":357.23,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":0,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":20315.81,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":1513.91,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":1287.04,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":11311.45,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":0,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":3411.59,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":1485.62,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":0,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":3467.44,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":4359.68,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":2825.14,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":1367.89,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":54105.75,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":7318.3,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":24168.69,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":2218.69,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":39188.69,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":36040.19,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":2593.91,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":113504.42,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":6399108.12,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":45502.9,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":6002.4,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":1698.93,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":25791.2,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":141152.15},\"files\":{\"raw_data_file\":\"probe_047.D\"}}\n{\"sample_ID\":\"b2_pd_33\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe_048\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-09T14:56:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":28415.53,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":43266.38,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":459130.66,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":41974.3,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":1248442.22,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":13455.87,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":0,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":17698.04,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":585075.01,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":3824.35,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":688917.22,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":227095,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":13857.21,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":5678.92,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":42922.82,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":5437.18,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":24669.77,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":32870.14,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":43104,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":26353.66,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":7820.24,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":96102.79,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":7044310.79,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":39438.1,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":5413.1,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":3310.56,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":27048.18,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":97780.03},\"files\":{\"raw_data_file\":\"probe_048.D\"}}\n{\"sample_ID\":\"b2_pd_34\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe_049\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-09T16:20:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":543326.68,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":235038.35,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":2574977.9,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":238754.13,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":7389753.72,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":81047.3,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":32277.93,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":213186.06,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":6106498.83,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":56196.83,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":7722274.43,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":2504928.38,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":147941.77,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":71505.7,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":302826.85,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":171479.9,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":31588.77,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":513682.2,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":57297.56,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":56984.03,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":33309.39,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":75757.75,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":3375364.77,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":37493.25,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":6115.66,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":8641.82,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":49389.4,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":106689.05},\"files\":{\"raw_data_file\":\"probe_049.D\"}}\n{\"sample_ID\":\"b2_pd_35\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe_050\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-09T17:43:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":300.14,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":3878.41,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":40437.36,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":607.27,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":67934.95,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":6389.07,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":0,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":4293.95,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":34404.2,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":0,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":37575.45,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":17668.99,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":3446.76,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":1600.4,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":34022.66,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":3287.36,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":23421.37,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":3078.14,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":44223.75,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":27033.01,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":402159.27,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":82511.08,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":5755760.41,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":36386.16,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":4950.55,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":3479.74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":53250.47,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":76055.92},\"files\":{\"raw_data_file\":\"probe_050.D\"}}\n{\"sample_ID\":\"b2_pd_36\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe_051\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-09T19:07:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":78.45,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":0,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":13910.65,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":1456.65,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":1117.36,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":4910.43,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":0,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":2494.54,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":471.56,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":0,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":3984.42,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":3040.26,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":2935.69,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":1149.07,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":30099,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":2226.98,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":20992.95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":1584.06,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":33509.39,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":17461.5,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":299039.12,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":80677.2,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":2889718.19,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":33302.32,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":2802.12,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":4817.25,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":84612.69,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":69678.66},\"files\":{\"raw_data_file\":\"probe_051.D\"}}\n{\"sample_ID\":\"b2_pd_37\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe_055\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-09T20:31:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":74.46,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":0,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":20619.64,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":2017.87,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":1910.59,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":8372.74,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":0,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":1832.2,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":450.6,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":0,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":2601.56,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":3519.38,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":5150.75,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":0,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":33464.71,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":3103.39,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":11833.88,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":1291.94,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":25687.44,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":20134.76,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":609845.32,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":50094.17,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":5804513.4,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":47979.89,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":3670.61,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":3717.32,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":24513.33,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":60074},\"files\":{\"raw_data_file\":\"probe_055.D\"}}\n{\"sample_ID\":\"b2_pd_38\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe_056\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-09T21:54:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":328.38,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":0,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":19827.48,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":103.57,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":732.75,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":6859.68,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":0,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":3488.14,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":554.95,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":0,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":2250.23,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":3110.46,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":3146.36,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":1171.05,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":38946.82,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":4089.15,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":17772.88,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":1308.29,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":35164.63,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":32977.67,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":350059.73,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":43925.29,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":3358793.38,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":53957.85,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":4582.36,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":2698.08,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":17978.01,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":83522.28},\"files\":{\"raw_data_file\":\"probe_056.D\"}}\n{\"sample_ID\":\"b2_pd_39\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe_057\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-09T23:18:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":88003.31,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":117688.08,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":1153071.44,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":99977.72,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":2853592.62,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":32087.31,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":3874.53,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":22435.66,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":1605018.22,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":9052.36,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":1852255.6,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":634677.04,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":41162.43,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":14738.96,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":130857.3,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":30950.33,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":22174.99,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":39671.71,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":37211.57,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":41532.59,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":2519.9,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":62811.39,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":7730691.57,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":48375.89,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":6883.94,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":664.25,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":16188.25,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":95139.82},\"files\":{\"raw_data_file\":\"probe_057.D\"}}\n{\"sample_ID\":\"b2_pd_40\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe_058\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-10T00:42:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":369.98,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":7692.15,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":81197.49,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":7694.63,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":116261.04,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":9432.91,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":1309.39,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":4425.84,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":63972.27,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":0,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":66179.41,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":31972.67,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":5347.42,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":2212.8,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":44667.99,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":7776.45,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":22393.37,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":1075.37,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":38214.11,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":30917.63,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":460504.73,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":82029.77,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":5687471.85,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":38240.23,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":4720.86,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":2419.65,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":19004.02,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":148371.54},\"files\":{\"raw_data_file\":\"probe_058.D\"}}\n{\"sample_ID\":\"b2_pd_41\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe_059\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-10T02:05:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":178.03,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":0,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":58992.9,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":6631.39,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":3824.27,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":11644.07,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":1627.03,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":3036.86,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":1251.47,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":0,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":3888.7,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":3166.36,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":5057.55,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":1477.11,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":42711.65,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":2985.66,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":24980.66,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":1626.07,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":46535.15,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":41965.98,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":326418.62,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":73747.43,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":4797040.92,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":47553.65,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":5634.64,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":1005.08,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":50204.12,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":199479.92},\"files\":{\"raw_data_file\":\"probe_059.D\"}}\n{\"sample_ID\":\"b2_pd_42\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe_060\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-10T03:29:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":104.51,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":0,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":19516.63,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":1537.73,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":1763.81,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":5439.98,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":0,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":3643.49,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":1452.71,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":0,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":2220.13,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":3851.01,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":4061.42,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":1162.33,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":42451.46,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":3693.99,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":18946.3,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":1416.23,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":34447.1,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":31465.89,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":462582.19,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":79137.95,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":5037668.39,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":41800.83,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":4335.19,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":1168.03,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":23489.68,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":117086.61},\"files\":{\"raw_data_file\":\"probe_060.D\"}}\n{\"sample_ID\":\"b2_pd_43\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe_061\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-10T04:52:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":49.82,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":0,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":24870.3,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":1084.34,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":1716.77,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":4813.63,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":0,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":2461.09,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":1823.48,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":0,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":1340.87,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":3463.43,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":5246.61,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":891.12,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":24462.54,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":2517.21,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":11826.75,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":1376.02,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":27660.63,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":25732.87,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":411701.52,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":45541.84,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":5327409.02,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":37614.07,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":4118.44,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":1307.64,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":33055.27,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":66451.36},\"files\":{\"raw_data_file\":\"probe_061.D\"}}\n{\"sample_ID\":\"b2_pd_44\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe_062\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-10T06:16:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":29859.92,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":51885.95,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":546996.49,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":47250.72,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":1291261.43,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":22322.8,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":2973.78,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":8175.91,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":635710.49,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":2712.68,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":714937.35,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":245532.62,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":21902.26,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":6190.66,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":63115.73,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":15240.39,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":13163.77,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":7136.42,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":25094.64,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":23202.38,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":391332.73,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":64960.39,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":3814031.3,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":40309.72,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":3886.64,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":1761.55,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":29996.27,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":72640.12},\"files\":{\"raw_data_file\":\"probe_062.D\"}}\n{\"sample_ID\":\"b2_pd_45\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe_063\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-10T07:39:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":6040.4,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":18534.41,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":185026.02,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":14885.03,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":387698.1,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":12585.92,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":967.73,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":5879.03,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":157983.4,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":883.51,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":167158.51,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":61293.7,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":7463.37,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":2268.99,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":35074.66,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":7330.58,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":17322.85,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":4222,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":42011.27,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":38012.02,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":408371.51,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":54001.93,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":3965723.04,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":33053.3,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":5080.39,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":2220.91,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":30651.66,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":112254.72},\"files\":{\"raw_data_file\":\"probe_063.D\"}}\n{\"sample_ID\":\"b2_pd_46\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe_064\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-10T09:03:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":130.01,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":0,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":25259.89,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":2314.42,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":6661.63,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":6838.8,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":0,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":3639.47,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":1874.55,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":0,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":1254.35,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":5369.11,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":3961.16,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":1250.64,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":27584.73,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":1960.77,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":16391.19,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":1415.67,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":43468.81,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":31513.84,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":524747.51,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":69286.14,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":4858017.85,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":36131.7,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":4131.15,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":5370.74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":38245.49,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":74632.92},\"files\":{\"raw_data_file\":\"probe_064.D\"}}\n{\"sample_ID\":\"b2_pd_47\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe_065\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-10T10:26:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":313.01,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":0,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":10676.99,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":1334.36,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":1069.44,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":4902.89,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":1024.49,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":2621.85,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":843.26,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":0,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":597.64,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":4308.05,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":6549.97,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":864.7,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":18256.37,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":2560.89,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":11333.86,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":1525.16,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":26561.7,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":16147.28,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":509842.57,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":48016.91,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":7814183,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":34980.69,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":3687.67,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":2556.36,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":31155.78,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":50865.67},\"files\":{\"raw_data_file\":\"probe_065.D\"}}\n{\"sample_ID\":\"b2_pd_48\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe_066\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-10T11:50:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":110.97,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":0,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":13807.7,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":2608.53,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":3075.71,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":1505.26,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":1096.24,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":3310.66,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":758.42,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":0,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":2319.86,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":3334.1,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":3809.43,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":801.61,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":32966.32,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":2085.3,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":22500.47,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":1643.11,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":39343.63,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":22657.58,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":396037.32,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":49361.22,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":6535950.38,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":35260.1,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":3805.95,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":5388.79,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":39075.62,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":70369.2},\"files\":{\"raw_data_file\":\"probe_066.D\"}}\n{\"sample_ID\":\"b2_pd_49\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe_067\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-10T13:13:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":445.67,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":1626.66,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":19353.85,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":234.74,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":30330.64,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":5693.04,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":0,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":4551.92,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":15588.57,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":0,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":13753.68,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":10145.41,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":6576.41,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":1073.89,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":30363.62,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":3945.14,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":19156.73,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":1164.84,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":34673.42,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":31984.51,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":25309.57,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":70020.17,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":8062793.13,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":43709.54,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":5226.39,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":7951.63,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":33951.78,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":137829.22},\"files\":{\"raw_data_file\":\"probe_067.D\"}}\n{\"sample_ID\":\"b2_pd_50\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe_068\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-10T14:37:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":11728.59,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":24562.86,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":256689.35,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":22650.99,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":559531.19,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":12170.74,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":0,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":7933.34,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":246399.96,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":0,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":245273.35,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":108608.49,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":12617.5,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":2949.15,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":45455.42,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":9241.44,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":20769.92,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":7432.45,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":45545.4,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":44957.21,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":469835.95,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":68201.7,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":5247566.54,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":48517.87,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":5649.94,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":293.74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":46948.47,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":186205.04},\"files\":{\"raw_data_file\":\"probe_068.D\"}}\n{\"sample_ID\":\"b2_pd_51\",\"sample_type\":\"live_leaf_sample\",\"run_name\":\"probe_069\",\"experiment_type\":\"VOCs\",\"level\":\"NA\",\"experiment_date-time\":{$date:\"2019-04-10T16:00:00-07:00\"},\"operator\":\"Laura Meredith\",\"protocol_ID\":\"protocolA\",\"Result\":{\"CIS-3-HEXENOL_CAS\":\"928-96-1\",\"CIS-3-HEXENOL_RT_min\":18.90856667,\"CIS-3-HEXENOL_match_factor\":92,\"CIS-3-HEXENOL_area\":0,\"tricyclene_CAS\":\"508-32-7\",\"tricyclene_RT_min\":22.56828333,\"tricyclene_match_factor\":95,\"tricyclene_area\":0,\"ALPHA-PINENE_CAS\":\"80-56-8\",\"ALPHA-PINENE_RT_min\":23.08402361,\"ALPHA-PINENE_match_factor\":90,\"ALPHA-PINENE_area\":10294.14,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_CAS\":\"60844-39-5\",\"TRANS-8-METHYLBICYCLO-NON-3-ENE_RT_min\":23.40683333,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_match_factor\":76,\"TRANS-8-METHYLBICYCLO-NON-3-ENE_area\":158.14,\"CAMPHENE_CAS\":\"79-92-5\",\"CAMPHENE_RT_min\":24.02622181,\"CAMPHENE_match_factor\":95,\"CAMPHENE_area\":950.26,\"2-BETA-PINENE_CAS\":\"127-91-3\",\"2-BETA-PINENE_RT_min\":25.27126703,\"2-BETA-PINENE_match_factor\":87,\"2-BETA-PINENE_area\":458.52,\"Linalool_CAS\":\"78-70-6\",\"Linalool_RT_min\":25.4613,\"Linalool_match_factor\":73,\"Linalool_area\":0,\"3-Hexen-1-ol-acetate_CAS\":\"3681-71-8\",\"3-Hexen-1-ol-acetate_RT_min\":26.0719831,\"3-Hexen-1-ol-acetate_match_factor\":83,\"3-Hexen-1-ol-acetate_area\":4013.85,\"Benzene_CAS\":\"538-93-2\",\"Benzene_RT_min\":26.55945707,\"Benzene_match_factor\":97,\"Benzene_area\":805.74,\"MT-1_RT_min\":26.86888333,\"MT-1_area\":38583.44,\"ALPHA_TERPINENE_CAS\":\"99-86-5\",\"ALPHA_TERPINENE_RT_min\":27.02828791,\"ALPHA_TERPINENE_match_factor\":96,\"ALPHA_TERPINENE_area\":4098.53,\"PARA_CYMENE_CAS\":\"99-87-6\",\"PARA_CYMENE_RT_min\":27.30304381,\"PARA_CYMENE_match_factor\":98,\"PARA_CYMENE_area\":3350.51,\"Bornylene_CAS\":\"464-17-5\",\"Bornylene_RT_min\":27.5306499,\"Bornylene_match_factor\":92,\"Bornylene_area\":4689.17,\"GAMMA_TERPINENE_CAS\":\"99-85-4\",\"GAMMA_TERPINENE_RT_min\":28.74779474,\"GAMMA_TERPINENE_match_factor\":80,\"GAMMA_TERPINENE_area\":791.66,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_CAS\":\"69737-10-6\",\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_RT_min\":31.37898151,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_match_factor\":95,\"1-Amino-1-ortho-chlorophenyl-2-2-quinoxalinylethene_area\":34260.31,\"Acetic_acid_2-ethylhexyl_ester_CAS\":\"103-09-3\",\"Acetic_acid_2-ethylhexyl_ester_RT_min\":32.08357486,\"Acetic_acid_2-ethylhexyl_ester_match_factor\":92,\"Acetic_acid_2-ethylhexyl_ester_area\":2551.89,\"N-DECANAL_CAS\":\"112-31-2\",\"N-DECANAL_RT_min\":34.48635011,\"N-DECANAL_match_factor\":95,\"N-DECANAL_area\":26249.98,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_CAS\":\"80-57-9\",\"Bicyclo311hept-3-en-2-one_466-trimethyl-_RT_min\":34.99260146,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_match_factor\":95,\"Bicyclo311hept-3-en-2-one_466-trimethyl-_area\":2250.28,\"26-Dichlorostyrene_CAS\":\"28469-92-3\",\"26-Dichlorostyrene_RT_min\":35.59816569,\"26-Dichlorostyrene_match_factor\":88,\"26-Dichlorostyrene_area\":41503.74,\"Benzothiazole_CAS\":\"95-16-9\",\"Benzothiazole_RT_min\":35.78156667,\"Benzothiazole_match_factor\":75,\"Benzothiazole_area\":37023.92,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_CAS\":\"61142-63-0\",\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_RT_min\":36.58880224,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_match_factor\":57,\"Cyclohexane_11-1-22-dimethylbutyl-13-propanediylbis-_area\":630551.39,\"Benzaldehyde_26-dichloro-_CAS\":\"83-38-5\",\"Benzaldehyde_26-dichloro-_RT_min\":36.80601002,\"Benzaldehyde_26-dichloro-_match_factor\":94,\"Benzaldehyde_26-dichloro-_area\":98069.62,\"2H-Azepin-2-one_hexahydro-_CAS\":\"105-60-2\",\"2H-Azepin-2-one_hexahydro-_RT_min\":37.04326289,\"2H-Azepin-2-one_hexahydro-_match_factor\":95,\"2H-Azepin-2-one_hexahydro-_area\":6178792.46,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_CAS\":\"116884-83-4\",\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_RT_min\":37.39351358,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_match_factor\":72,\"4H-3-anilino-1-benzothiopyran-4-one_1-oxide_area\":46521.49,\"beta-Cubebene_CAS\":\"13744-15-5\",\"beta-Cubebene_RT_min\":40.94533694,\"beta-Cubebene_match_factor\":60,\"beta-Cubebene_area\":5304.87,\"EE-ALPHA-FARNESENE_CAS\":\"502-61-4\",\"EE-ALPHA-FARNESENE_RT_min\":44.44866667,\"EE-ALPHA-FARNESENE_match_factor\":84,\"EE-ALPHA-FARNESENE_area\":1477.64,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_CAS\":\"56298-98-7\",\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_RT_min\":45.05855413,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_match_factor\":74,\"1H-Inden-1-one_23-dihydro-3345-tetramethyl-_area\":30579.14,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_CAS\":\"89-18-9\",\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_RT_min\":56.42417323,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_match_factor\":76,\"12-Benzenedicarboxylic_acid_butyl_8-methylnonyl_ester_area\":126545.34},\"files\":{\"raw_data_file\":\"probe_069.D\"}}\n" ], [ "fout.close()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
c52e421d1f2a02fb84aea7900f1c7eb1f5f6b22b
21,553
ipynb
Jupyter Notebook
examples/02_model_collaborative_filtering/cornac_bpr_deep_dive.ipynb
ehrlinger/Recommenders
199dd80919a82d516f9a954cfc88851f80d9ba99
[ "MIT" ]
10,147
2019-05-07T07:24:36.000Z
2022-03-31T21:16:41.000Z
examples/02_model_collaborative_filtering/cornac_bpr_deep_dive.ipynb
ehrlinger/Recommenders
199dd80919a82d516f9a954cfc88851f80d9ba99
[ "MIT" ]
750
2019-05-07T07:34:33.000Z
2022-03-31T10:11:55.000Z
examples/02_model_collaborative_filtering/cornac_bpr_deep_dive.ipynb
ehrlinger/Recommenders
199dd80919a82d516f9a954cfc88851f80d9ba99
[ "MIT" ]
1,983
2019-05-07T08:56:48.000Z
2022-03-31T16:43:00.000Z
36.223529
403
0.537095
[ [ [ "<i>Copyright (c) Microsoft Corporation. All rights reserved.</i>\n\n<i>Licensed under the MIT License.</i>", "_____no_output_____" ], [ "# Bayesian Personalized Ranking (BPR)\n\nThis notebook serves as an introduction to Bayesian Personalized Ranking (BPR) model for implicit feedback. In this tutorial, we focus on learning the BPR model using matrix factorization approach, hence, the model is sometimes also named BPRMF.\n\nThe implementation of the model is from [Cornac](https://github.com/PreferredAI/cornac), which is a framework for recommender systems with a focus on models leveraging auxiliary data (e.g., item descriptive text and image, social network, etc).", "_____no_output_____" ], [ "## 0 Global Settings and Imports", "_____no_output_____" ] ], [ [ "import sys\nimport os\nimport cornac\nimport papermill as pm\nimport scrapbook as sb\nimport pandas as pd\nfrom recommenders.datasets import movielens\nfrom recommenders.datasets.python_splitters import python_random_split\nfrom recommenders.evaluation.python_evaluation import map_at_k, ndcg_at_k, precision_at_k, recall_at_k\nfrom recommenders.models.cornac.cornac_utils import predict_ranking\nfrom recommenders.utils.timer import Timer\nfrom recommenders.utils.constants import SEED\n\nprint(\"System version: {}\".format(sys.version))\nprint(\"Cornac version: {}\".format(cornac.__version__))", "System version: 3.6.8 |Anaconda, Inc.| (default, Feb 21 2019, 18:30:04) [MSC v.1916 64 bit (AMD64)]\nCornac version: 1.1.2\n" ], [ "# Select MovieLens data size: 100k, 1m, 10m, or 20m\nMOVIELENS_DATA_SIZE = '100k'\n\n# top k items to recommend\nTOP_K = 10\n\n# Model parameters\nNUM_FACTORS = 200\nNUM_EPOCHS = 100", "_____no_output_____" ] ], [ [ "## 1 BPR Algorithm\n\n### 1.1 Personalized Ranking from Implicit Feedback\n\nThe task of personalized ranking aims at providing each user a ranked list of items (recommendations). This is very common in scenarios where recommender systems are based on implicit user behavior (e.g. purchases, clicks). The available observations are only positive feedback where the non-observed ones are a mixture of real negative feedback and missing values.\n\nOne usual approach for item recommendation is directly predicting a preference score $\\hat{x}_{u,i}$ given to item $i$ by user $u$. BPR uses a different approach by using item pairs $(i, j)$ and optimizing for the correct ranking given preference of user $u$, thus, there are notions of *positive* and *negative* items. The training data $D_S : U \\times I \\times I$ is defined as:\n\n$$D_S = \\{(u, i, j) \\mid i \\in I^{+}_{u} \\wedge j \\in I \\setminus I^{+}_{u}\\}$$\n\nwhere user $u$ is assumed to prefer $i$ over $j$ (i.e. $i$ is a *positive item* and $j$ is a *negative item*).\n\n\n### 1.2 Objective Function\n\nFrom the Bayesian perspective, BPR maximizes the posterior probability over the model parameters $\\Theta$ by optimizing the likelihood function $p(i >_{u} j | \\Theta)$ and the prior probability $p(\\Theta)$.\n\n$$p(\\Theta \\mid >_{u}) \\propto p(i >_{u} j \\mid \\Theta) \\times p(\\Theta)$$\n\nThe joint probability of the likelihood over all users $u \\in U$ can be simplified to:\n\n$$ \\prod_{u \\in U} p(>_{u} \\mid \\Theta) = \\prod_{(u, i, j) \\in D_S} p(i >_{u} j \\mid \\Theta) $$\n\nThe individual probability that a user $u$ prefers item $i$ to item $j$ can be defined as:\n\n$$ p(i >_{u} j \\mid \\Theta) = \\sigma (\\hat{x}_{uij}(\\Theta)) $$\n\nwhere $\\sigma$ is the logistic sigmoid:\n\n$$ \\sigma(x) = \\frac{1}{1 + e^{-x}} $$\n\nThe preference scoring function $\\hat{x}_{uij}(\\Theta)$ could be an arbitrary real-valued function of the model parameter $\\Theta$. Thus, it makes BPR a general framework for modeling the relationship between triplets $(u, i, j)$ where different model classes like matrix factorization could be used for estimating $\\hat{x}_{uij}(\\Theta)$.\n\nFor the prior, one of the common pratices is to choose $p(\\Theta)$ following a normal distribution, which results in a nice form of L2 regularization in the final log-form of the objective function.\n\n$$ p(\\Theta) \\sim N(0, \\Sigma_{\\Theta}) $$\n\nTo reduce the complexity of the model, all parameters $\\Theta$ are assumed to be independent and having the same variance, which gives a simpler form of the co-variance matrix $\\Sigma_{\\Theta} = \\lambda_{\\Theta}I$. Thus, there are less number of hyperparameters to be determined.\n\nThe final objective of the maximum posterior estimator:\n\n$$ J = \\sum_{(u, i, j) \\in D_S} \\text{ln } \\sigma(\\hat{x}_{uij}) - \\lambda_{\\Theta} ||\\Theta||^2 $$\n\nwhere $\\lambda_\\Theta$ are the model specific regularization paramerters.\n\n\n### 1.3 Learning with Matrix Factorization\n\n#### Stochastic Gradient Descent\n\nAs the defined objective function is differentible, gradient descent based method for optimization is naturally adopted. The gradient of the objective $J$ with respect to the model parameters:\n\n$$\n\\begin{align}\n\\frac{\\partial J}{\\partial \\Theta} & = \\sum_{(u, i, j) \\in D_S} \\frac{\\partial}{\\partial \\Theta} \\text{ln} \\ \\sigma(\\hat{x}_{uij}) - \\lambda_{\\Theta} \\frac{\\partial}{\\partial \\Theta} ||\\Theta||^2 \\\\\n& \\propto \\sum_{(u, i, j) \\in D_S} \\frac{-e^{-\\hat{x}_{uij}}}{1 + e^{-\\hat{x}_{uij}}} \\cdot \\frac{\\partial}{\\partial \\Theta} \\hat{x}_{uij} - \\lambda_{\\Theta} \\Theta\n\\end{align}\n$$\n\nDue to slow convergence of full gradient descent, we prefer using stochastic gradient descent to optimize the BPR model. For each triplet $(u, i, j) \\in D_S$, the update rule for the parameters:\n\n$$ \\Theta \\leftarrow \\Theta + \\alpha \\Big( \\frac{e^{-\\hat{x}_{uij}}}{1 + e^{-\\hat{x}_{uij}}} \\cdot \\frac{\\partial}{\\partial \\Theta} \\hat{x}_{uij} + \\lambda_\\Theta \\Theta \\Big) $$\n\n#### Matrix Factorization for Preference Approximation\n\nAs mentioned earlier, the preference scoring function $\\hat{x}_{uij}(\\Theta)$ could be approximated by any real-valued function. First, the estimator $\\hat{x}_{uij}$ is decomposed into:\n\n$$ \\hat{x}_{uij} = \\hat{x}_{ui} - \\hat{x}_{uj} $$\n\nThe problem of estimating $\\hat{x}_{ui}$ is a standard collaborative filtering formulation, where matrix factorization approach has shown to be very effective. The prediction formula can written as dot product between user feature vector $w_u$ and item feature vector $h_i$:\n\n$$ \\hat{x}_{ui} = \\langle w_u , h_i \\rangle = \\sum_{f=1}^{k} w_{uf} \\cdot h_{if} $$\n\nThe derivatives of matrix factorization with respect to the model parameters are:\n\n$$\n\\frac{\\partial}{\\partial \\theta} \\hat{x}_{uij} = \n\\begin{cases}\n (h_{if} - h_{jf}) & \\text{if } \\theta = w_{uf} \\\\\n w_{uf} & \\text{if } \\theta = h_{if} \\\\\n -w_{uf} & \\text{if } \\theta = h_{jf} \\\\\n 0 & \\text{else}\n\\end{cases}\n$$\n\nIn theory, any kernel can be used to estimate $\\hat{x}_{ui}$ besides the dot product $ \\langle \\cdot , \\cdot \\rangle $. For example, k-Nearest-Neighbor (kNN) has also been shown to achieve good performance.\n\n#### Analogies to AUC optimization\n\nBy optimizing the objective function of BPR model, we effectively maximizing [AUC](https://towardsdatascience.com/understanding-auc-roc-curve-68b2303cc9c5) measurement. To keep the notebook focused, please refer to the [paper](https://arxiv.org/ftp/arxiv/papers/1205/1205.2618.pdf) for details of the analysis (Section 4.1.1).", "_____no_output_____" ], [ "## 2 Cornac implementation of BPR\n\nBPR is implemented in the [Cornac](https://cornac.readthedocs.io/en/latest/index.html) framework as part of the model collections.\n* Detailed documentations of the BPR model in Cornac can be found [here](https://cornac.readthedocs.io/en/latest/models.html#bayesian-personalized-ranking-bpr).\n* Source codes of the BPR implementation is available on the Cornac Github repository, which can be found [here](https://github.com/PreferredAI/cornac/blob/master/cornac/models/bpr/recom_bpr.pyx).\n", "_____no_output_____" ], [ "## 3 Cornac BPR movie recommender\n\n\n### 3.1 Load and split data\n\nTo evaluate the performance of item recommendation, we adopted the provided `python_random_split` tool for the consistency. Data is randomly split into training and test sets with the ratio of 75/25.\n\n\nNote that Cornac also cover different [built-in schemes](https://cornac.readthedocs.io/en/latest/eval_methods.html) for model evaluation.", "_____no_output_____" ] ], [ [ "data = movielens.load_pandas_df(\n size=MOVIELENS_DATA_SIZE,\n header=[\"userID\", \"itemID\", \"rating\"]\n)\n\ndata.head()", "100%|███████████████████████████████████████████████████████████████████████████████████████| 4.81k/4.81k [00:08<00:00, 590KB/s]\n" ], [ "train, test = python_random_split(data, 0.75)", "_____no_output_____" ] ], [ [ "### 3.2 Cornac Dataset\n\nTo work with models implemented in Cornac, we need to construct an object from [Dataset](https://cornac.readthedocs.io/en/latest/data.html#module-cornac.data.dataset) class.\n\nDataset Class in Cornac serves as the main object that the models will interact with. In addition to data transformations, Dataset provides a bunch of useful iterators for looping through the data, as well as supporting different negative sampling techniques.", "_____no_output_____" ] ], [ [ "train_set = cornac.data.Dataset.from_uir(train.itertuples(index=False), seed=SEED)\n\nprint('Number of users: {}'.format(train_set.num_users))\nprint('Number of items: {}'.format(train_set.num_items))", "Number of users: 943\nNumber of items: 1642\n" ] ], [ [ "### 3.3 Train the BPR model\n\nThe BPR has a few important parameters that we need to consider:\n\n- `k`: controls the dimension of the latent space (i.e. the size of the vectors $w_u$ and $h_i$ ).\n- `max_iter`: defines the number of iterations of the SGD procedure.\n- `learning_rate`: controls the step size $\\alpha$ in the gradient update rules.\n- `lambda_reg`: controls the L2-Regularization $\\lambda$ in the objective function.\n\nNote that different values of `k` and `max_iter` will affect the training time.\n\nWe will here set `k` to 200, `max_iter` to 100, `learning_rate` to 0.01, and `lambda_reg` to 0.001. To train the model, we simply need to call the `fit()` method.", "_____no_output_____" ] ], [ [ "bpr = cornac.models.BPR(\n k=NUM_FACTORS,\n max_iter=NUM_EPOCHS,\n learning_rate=0.01,\n lambda_reg=0.001,\n verbose=True,\n seed=SEED\n)", "_____no_output_____" ], [ "with Timer() as t:\n bpr.fit(train_set)\nprint(\"Took {} seconds for training.\".format(t))", "100%|██████████████████████████████████████████████████████████| 100/100 [00:07<00:00, 13.27it/s, correct=92.19%, skipped=9.38%]\n" ] ], [ [ "### 3.4 Prediction and Evaluation\n\nNow that our model is trained, we can produce the ranked lists for recommendation. Every recommender models in Cornac provide `rate()` and `rank()` methods for predicting item rated value as well as item ranked list for a given user. To make use of the current evaluation schemes, we will through `predict()` and `predict_ranking()` functions inside `cornac_utils` to produce the predictions.\n\nNote that BPR model is effectively designed for item ranking. Hence, we only measure the performance using ranking metrics.", "_____no_output_____" ] ], [ [ "with Timer() as t:\n all_predictions = predict_ranking(bpr, train, usercol='userID', itemcol='itemID', remove_seen=True)\nprint(\"Took {} seconds for prediction.\".format(t))", "Took 1.7393803596496582 seconds for prediction.\n" ], [ "all_predictions.head()", "_____no_output_____" ], [ "k = 10\neval_map = map_at_k(test, all_predictions, col_prediction='prediction', k=k)\neval_ndcg = ndcg_at_k(test, all_predictions, col_prediction='prediction', k=k)\neval_precision = precision_at_k(test, all_predictions, col_prediction='prediction', k=k)\neval_recall = recall_at_k(test, all_predictions, col_prediction='prediction', k=k)\n\nprint(\"MAP:\\t%f\" % eval_map,\n \"NDCG:\\t%f\" % eval_ndcg,\n \"Precision@K:\\t%f\" % eval_precision,\n \"Recall@K:\\t%f\" % eval_recall, sep='\\n')", "MAP:\t0.109077\nNDCG:\t0.403395\nPrecision@K:\t0.354989\nRecall@K:\t0.180183\n" ], [ "# Record results with papermill for tests\nsb.glue(\"map\", eval_map)\nsb.glue(\"ndcg\", eval_ndcg)\nsb.glue(\"precision\", eval_precision)\nsb.glue(\"recall\", eval_recall)", "_____no_output_____" ] ], [ [ "## References\n\n1. Rendle, S., Freudenthaler, C., Gantner, Z., & Schmidt-Thieme, L. (2009, June). BPR: Bayesian personalized ranking from implicit feedback. https://arxiv.org/ftp/arxiv/papers/1205/1205.2618.pdf\n2. Pan, R., Zhou, Y., Cao, B., Liu, N. N., Lukose, R., Scholz, M., & Yang, Q. (2008, December). One-class collaborative filtering. https://cseweb.ucsd.edu/classes/fa17/cse291-b/reading/04781145.pdf\n3. **Cornac** - A Comparative Framework for Multimodal Recommender Systems. https://cornac.preferred.ai/", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ] ]
c52ea398844b53b1ff1496075be2753ee35a4154
5,498
ipynb
Jupyter Notebook
2020/Day 13.ipynb
AwesomeGitHubRepos/adventofcode
84ba7963a5d7905973f14bb1c2e3a59165f8b398
[ "MIT" ]
96
2018-04-21T07:53:34.000Z
2022-03-15T11:00:02.000Z
2020/Day 13.ipynb
AwesomeGitHubRepos/adventofcode
84ba7963a5d7905973f14bb1c2e3a59165f8b398
[ "MIT" ]
17
2019-02-07T05:14:47.000Z
2021-12-27T12:11:04.000Z
2020/Day 13.ipynb
AwesomeGitHubRepos/adventofcode
84ba7963a5d7905973f14bb1c2e3a59165f8b398
[ "MIT" ]
14
2019-02-05T06:34:15.000Z
2022-01-24T17:35:00.000Z
32.341176
262
0.556384
[ [ [ "# Day 13 - Prime number factors\n\n* https://adventofcode.com/2020/day/13\n\nFor part 1, we need to find the next multiple of a bus ID that's equal to or greater than our earliest departure time. The bus IDs, which determine their frequency, are all prime numbers, of course.\n\nWe can calculate the next bus departure $t$ for a given ID $b$ on or after earliest departure time $T$ as $t = b * \\lceil T / b \\rceil$ ($b$ multiplied by the ceiling of the division of $T$ by $b$).\n\n", "_____no_output_____" ] ], [ [ "import math\n\ndef parse_bus_ids(line: str) -> list[int]:\n return [int(b) for b in line.split(\",\") if b[0] != \"x\"]\n\ndef parse_input(lines: str) -> [int, list[int]]:\n return int(lines[0]), parse_bus_ids(lines[1])\n\ndef earliest_departure(earliest: int, bus_ids: list[int]) -> tuple[int, int]:\n t, bid = min((bid * math.ceil(earliest / bid), bid) for bid in bus_ids)\n return t - earliest, bid\n\ntest_earliest, test_bus_ids = parse_input([\"939\", \"7,13,x,x,59,x,31,19\"])\nassert earliest_departure(test_earliest, test_bus_ids) == (5, 59)", "_____no_output_____" ], [ "import aocd\ndata = aocd.get_data(day=13, year=2020).splitlines()\nearliest, bus_ids = parse_input(data)", "_____no_output_____" ], [ "wait_time, bus_id = earliest_departure(earliest, bus_ids)\nprint(\"Part 1:\", wait_time * bus_id)", "Part 1: 3035\n" ] ], [ [ "## Part 2: Chinese remainder theorem.\n\nFor part 2, we need to use the [Chinese remainder theorem](https://en.wikipedia.org/wiki/Chinese_remainder_theorem); this theorem was first introduced by the Chinese mathematician Sun-tzu (quote from the Wikipedia article):\n\n> There are certain things whose number is unknown. If we count them by threes, we have two left over; by fives, we have three left over; and by sevens, two are left over. How many things are there?\n\nWe need to find a number that if counted in prime number steps, have an offset left over, where the offset is the prime number minus the index in the bus ids list, modulo the bus id (the matching time stamp lies X minutes *before* the next bus departs).\n\nI only remembered about the theorem as it was also applicable to [Advent of Code 2017, day 13](../2017/Day%2013.ipynb) (although I didn't know it at the time).\n\nI adapted the [Rossetta Stone Python implementation](https://rosettacode.org/wiki/Chinese_remainder_theorem#Python) for this:", "_____no_output_____" ] ], [ [ "from functools import reduce\nfrom operator import mul\nfrom typing import Optional\n\ndef solve_chinese_remainder(bus_times: list[Optional[int]]) -> int:\n product = reduce(mul, (bid for bid in filter(None, bus_times)))\n summed = sum(\n ((bid - i) % bid) * mul_inv((factor := product // bid), bid) * factor\n for i, bid in enumerate(bus_times)\n if bid is not None\n )\n return summed % product\n\ndef mul_inv(a: int, b: int) -> int:\n if b == 1: return 1\n b0, x0, x1 = b, 0, 1\n while a > 1:\n q = a // b\n a, b = b, a % b\n x0, x1 = x1 - q * x0, x0\n if x1 < 0:\n x1 += b0\n return x1\n\ndef parse_bus_times(line: str) -> list[Optional[int]]:\n return [None if bus_id == \"x\" else int(bus_id) for bus_id in line.split(\",\")]\n\ntests = {\n \"7,13,x,x,59,x,31,19\": 1068781,\n \"17,x,13,19\": 3417,\n \"67,7,59,61\": 754018,\n \"67,x,7,59,61\": 779210,\n \"67,7,x,59,61\": 1261476,\n \"1789,37,47,1889\": 1202161486,\n}\nfor times, expected in tests.items():\n assert solve_chinese_remainder(parse_bus_times(times)) == expected", "_____no_output_____" ], [ "print(\"Part 2:\", solve_chinese_remainder(parse_bus_times(data[1])))", "Part 2: 725169163285238\n" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
c52ea59a536f61e8cc3dfeb8d847f9dc7254cffc
10,351
ipynb
Jupyter Notebook
install/tensorflow-install-mac-metal-jul-2021.ipynb
techthiyanes/t81_558_deep_learning
df4f13d866630a860a3b6d096b13b5b57f4f5847
[ "Apache-2.0" ]
4,721
2016-08-02T02:04:07.000Z
2022-03-31T23:18:54.000Z
install/tensorflow-install-mac-metal-jul-2021.ipynb
gcr1218/t81_558_deep_learning
4255e5bbf626b5f8a5940aa910a44caf33d750a5
[ "Apache-2.0" ]
133
2016-11-25T12:50:10.000Z
2022-03-31T20:24:10.000Z
install/tensorflow-install-mac-metal-jul-2021.ipynb
gcr1218/t81_558_deep_learning
4255e5bbf626b5f8a5940aa910a44caf33d750a5
[ "Apache-2.0" ]
2,627
2016-09-05T23:41:32.000Z
2022-03-31T16:12:27.000Z
41.075397
893
0.632403
[ [ [ "<a href=\"https://colab.research.google.com/github/jeffheaton/t81_558_deep_learning/blob/master/tensorflow-install-mac-metal-jul-2021.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# T81-558: Applications of Deep Neural Networks\n**Manual Python Setup**\n* Instructor: [Jeff Heaton](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)\n* For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/).", "_____no_output_____" ], [ "# Software Installation (Mac on Apple Metal M1)\nThis class is technically oriented. A successful student needs to be able to compile and execute Python code that makes use of TensorFlow for deep learning. There are two options for you to accomplish this:\n\n* Install Python, TensorFlow, and some IDE (Jupyter, TensorFlow, and others)\n* Use Google CoLab in the cloud\n\n## Installing Python and TensorFlow\n\nIs your Mac Intel or Apple Metal (ARM)? The newer Mac ARM M1-based machines have considerably better deep learning support than their older Intel-based counterparts. Mac has not supported NVIDIA GPUs since 2016; however, the new M1 chips offer similar capabilities that will allow you to run most of the code in this course. You can run any code not supported by the Apple M1 chip through Google CoLab, a free GPU-based Python environment. \n\nIf you are running an older Intel Mac, there still are some options. Refer to my [Intel Mac installation guide](tensorflow-install-mac-jan-2021.ipynb). \n\nWith the introduction of the M1 chip, Apple introduced a system on a chip. The new Mac M1 contains CPU, GPU, and deep learning hardware support, all on a single chip. The Mac M1 can run software created for the older Intel Mac's using an emulation layer called [Rosetta](https://en.wikipedia.org/wiki/Rosetta_(software)). To leverage the new M1 chip from Python, you must use a special Python distribution called [Miniforge](https://github.com/conda-forge/miniforge). Miniforge replaces other Python distributions that you might have installed, such as Anaconda or Miniconda. Apple instructions suggest that you remove Anaconda or Miniconda before installing Miniforge. Because the Mac M1 is a very different architecture than Intel, the Miniforge distribution will maximize your performance. Be aware that once you install Miniforge, it will become your default Python interpreter.\n\n## Install Miniforge\n\nThere are a variety of methods for installing Miniforge. If you have trouble following my instructions, you may refer to this [installation process](https://developer.apple.com/metal/tensorflow-plugin/), upon which I base these instructions.\n\nI prefer to use [Homebrew](https://brew.sh/) to install Miniforge. Homebrew is a package manager for the Mac, similar to **yum** or **apt-get** for Linux. To install Homebrew, follow this [link](https://brew.sh/) and copy/paste the installation command into a Mac terminal window.\n\nOnce you have installed Homebrew, I suggest closing the terminal window and opening a new one to complete the installation. \n\nNext, you should install the xcode-select command-line utilities. Use the following command to install:\n\n```\nxcode-select --install\n```\n\nIf the above command gives an error, you should install XCode from the App Store.\n\nYou will now use Homebrew to install Miniforge with the following command:\n\n```\nbrew install miniforge\n```\n\nYou should note which directory \n\n## Initiate Miniforge\n\nRun the following command to initiate your conda base environment:\n\n```\nconda init\n```\n\nThis will set the python `PATH` to the Miniforge base in your profile (`~/.bash_profile` if bash or `~/.zshenv` if zsh) and create the base virtual environment.\n\n## Make Sure you Have the Correct Python (when things go wrong)\n\nSometimes previous versions of Python might have been installed, and when you attempt to run the install script below you will recieve an error:\n\n```\nCollecting package metadata (repodata.json): done\nSolving environment: failed\n\nResolvePackageNotFound: \n - tensorflow-deps\n```\n\nTo verify that you have the correct Python version registered, close and reopen your terminal window. Issue the following command:\n\n```\nwhich python\n```\n\nThis command should respond with something similar to:\n\n```\n/opt/homebrew/Caskroom/miniforge/base/bin/python\n```\n\nThe key things to look for in the above response are \"homebrew\" and \"miniforge\". If you see \"anaconda\" or \"miniconda\" your path is pointing to the wrong Python. You will need to modify your \".zshrc\", make sure that the three Python paths match the path that \"brew\" installed it into earlier. Most likely your \"miniforge\" is installed in one of these locations:\n\n* /usr/local/Caskroom/miniforge/base\n* /opt/homebrew/Caskroom/miniforge/base\n\nMore info [here](https://github.com/conda-forge/miniforge/issues/127). \n\n\n## Install Jupyter and Create Environment\n\nNext, lets install Jupyter, which is the editor you will use in this course.\n\n```\nconda install -y jupyter\n```\n\nWe will actually launch Jupyter later.\n\nFirst, we deactivate the base environment.\n\n```\nconda deactivate\n```\n\nNext, we will install the Mac M1 [tensorflow-apple-metal.yml](https://raw.githubusercontent.com/jeffheaton/t81_558_deep_learning/master/tensorflow-apple-metal.yml) file that I provide. Run the following command from the same directory that contains **tensorflow-apple-metal.yml**.\n\n```\nconda env create -f tensorflow-apple-metal.yml -n tensorflow\n```\n\n# Issues Creating Environment (when things go wrong)\n\nDue to some [recent changes](https://github.com/grpc/grpc/issues/25082) in one of the TensorFlow dependancies you may get the following error when installing the YML file. \n\n```\nCollecting grpcio\n Using cached grpcio-1.34.0.tar.gz (21.0 MB)\n ERROR: Command errored out with exit status 1:\n```\n\nIf you encounter this error, remove your environment, define two environmental variables, and try again:\n\n```\nconda env remove --name tensorflow\nexport GRPC_PYTHON_BUILD_SYSTEM_OPENSSL=1\nexport GRPC_PYTHON_BUILD_SYSTEM_ZLIB=1\nconda env create -f tensorflow-apple-metal.yml -n tensorflow\n```\n\n\n# Activating New Environment\n\nTo enter this environment, you must use the following command: \n\n```\nconda activate tensorflow\n```\n\nFor now, let's add Jupyter support to your new environment.\n\n```\nconda install nb_conda\n```\n\n## Register your Environment\n\nThe following command registers your **tensorflow** environment. Again, make sure you \"conda activate\" your new **tensorflow** environment.\n\n```\npython -m ipykernel install --user --name tensorflow --display-name \"Python 3.9 (tensorflow)\"\n```\n\n## Testing your Environment\n\nYou can now start Jupyter notebook. Use the following command.\n\n```\njupyter notebook\n```\n\nYou can now run the following code to check that you have the versions expected.", "_____no_output_____" ] ], [ [ "# What version of Python do you have?\nimport sys\n\nimport tensorflow.keras\nimport pandas as pd\nimport sklearn as sk\nimport tensorflow as tf\n\nprint(f\"Tensor Flow Version: {tf.__version__}\")\nprint(f\"Keras Version: {tensorflow.keras.__version__}\")\nprint()\nprint(f\"Python {sys.version}\")\nprint(f\"Pandas {pd.__version__}\")\nprint(f\"Scikit-Learn {sk.__version__}\")\ngpu = len(tf.config.list_physical_devices('GPU'))>0\nprint(\"GPU is\", \"available\" if gpu else \"NOT AVAILABLE\")", "Init Plugin\nInit Graph Optimizer\nInit Kernel\nTensor Flow Version: 2.5.0\nKeras Version: 2.5.0\n\nPython 3.9.7 | packaged by conda-forge | (default, Sep 29 2021, 19:24:02) \n[Clang 11.1.0 ]\nPandas 1.3.4\nScikit-Learn 1.0.1\nGPU is available\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ] ]
c52eacdc153db3f74c7bc3f02a34d8fd5efae466
3,306
ipynb
Jupyter Notebook
News Analytics.ipynb
mobu/PyNews
8efc1fa5825e22bbbce7cb90e158440d80fbb100
[ "MIT" ]
null
null
null
News Analytics.ipynb
mobu/PyNews
8efc1fa5825e22bbbce7cb90e158440d80fbb100
[ "MIT" ]
null
null
null
News Analytics.ipynb
mobu/PyNews
8efc1fa5825e22bbbce7cb90e158440d80fbb100
[ "MIT" ]
null
null
null
27.322314
146
0.538717
[ [ [ "import json\nimport numpy as np\nimport pandas as pd\nimport nltk\nfrom nltk import FreqDist\nfrom nltk.corpus import stopwords\nimport html\nfrom textblob import TextBlob\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.cluster import KMeans", "_____no_output_____" ], [ "source = 'cnn'\ndate = '20180425'\nfilename = 'gun control/CNN/{}Summary.csv'.format(date)\n\ndf = pd.read_json(('gun control/CNN/gun-{}{}.json').format(source,date),encoding='utf-8',orient='columns')\npolarity = []\nsubjectivity = []\ntop_word = []\nwordlist = np.array([])\nratio = np.array([])\nword_count = np.array([])\nsen_polarity = []\nsentence_array = []\n\nstop_words = set(stopwords.words('english'))\n\nfor k in df['Content'].iteritems():\n analysis = TextBlob(str(k))\n polarity.append(analysis.sentiment.polarity)\n subjectivity.append(analysis.sentiment.subjectivity)\n word_count = np.append(word_count,len(analysis.words))\n \n words = analysis.words\n words = [word.lower() for word in words]\n words = [word for word in words if len(word) > 1]\n words = [word for word in words if not word.isnumeric()]\n words = [word for word in words if not word in stop_words]\n words = [word for word in words if word.isalpha()]\n \n fdist = nltk.FreqDist(words)\n \n for word, frequency in fdist.most_common(1):\n wordlist = np.append(wordlist,frequency)\n top_word.append(word)\n \ndata = pd.DataFrame({'Polarity': polarity,'Subjectivity': subjectivity,'TopWord':top_word,'TopWordFreq':wordlist,'TotalWord':word_count})\n\nprint('{} Summary \\n'.format(source))\ndata.sort_index(axis=1)\nprint(data)", "cnn Summary \n\n Polarity Subjectivity TopWord TopWordFreq TotalWord\n0 -0.055556 0.544444 biden 4.0 140.0\n1 -0.004959 0.335093 hart 35.0 2387.0\n" ], [ "data.to_csv(filename)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
c52eae1632d4343f4506555223c69be2bff2bfa6
31,726
ipynb
Jupyter Notebook
resources/Vis1Exercises.ipynb
isabella232/strata-sv-2015-tutorial
7bcd8973bc7825e5e9c4edadbad037945e601f6f
[ "BSD-3-Clause" ]
27
2015-02-12T15:55:56.000Z
2019-12-29T17:04:38.000Z
resources/Vis1Exercises.ipynb
jupyter/strata-sv-2015-tutorial
7bcd8973bc7825e5e9c4edadbad037945e601f6f
[ "BSD-3-Clause" ]
7
2015-01-28T22:18:34.000Z
2015-02-18T17:51:37.000Z
resources/Vis1Exercises.ipynb
isabella232/strata-sv-2015-tutorial
7bcd8973bc7825e5e9c4edadbad037945e601f6f
[ "BSD-3-Clause" ]
43
2015-01-28T22:16:04.000Z
2021-03-03T07:39:08.000Z
242.183206
20,257
0.910547
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
c52ec31633fbb9a267ba3fc4e12417ca79292122
5,418
ipynb
Jupyter Notebook
docs/tutorials/matching/faq.ipynb
SHAAAAN/graspy
b95099585bea7a32de2edaa5f5e0cba66471edd4
[ "Apache-2.0" ]
null
null
null
docs/tutorials/matching/faq.ipynb
SHAAAAN/graspy
b95099585bea7a32de2edaa5f5e0cba66471edd4
[ "Apache-2.0" ]
null
null
null
docs/tutorials/matching/faq.ipynb
SHAAAAN/graspy
b95099585bea7a32de2edaa5f5e0cba66471edd4
[ "Apache-2.0" ]
1
2020-03-31T22:02:24.000Z
2020-03-31T22:02:24.000Z
32.638554
418
0.602252
[ [ [ "# Introduction to Graph Matching", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ] ], [ [ "The graph matching problem (GMP), is meant to find an allignment of nodes between two graphs that minimizes the number of edge disagreements between those two graphs. Therefore, the GMP can be formally written as an optimization problem: \n\n\\begin{equation}\n\\begin{aligned}\n\\min & {\\;-trace(APB^T P^T)}\\\\\n\\text{s.t. } & {\\;P \\: \\epsilon \\: \\mathcal{P}} \\\\\n\\end{aligned}\n\\end{equation}\n\nWhere $\\mathcal{P}$ is the set of possible permutation matrices.\n\nThe Quadratic Assignment problem is a combinatorial opimization problem, modeling following the real-life problem: \n\n\"Consider the problem of allocating a set of facilities to a set of locations, with the\ncost being a function of the distance and flow between the facilities, plus costs associated\nwith a facility being placed at a certain location. The objective is to assign each facility\nto a location such that the total cost is minimized.\" [1]\n\nWhen written as an optimization problem, the QAP is represented as:\n\n\\begin{equation}\n\\begin{aligned}\n\\min & {\\; trace(APB^T P^T)}\\\\\n\\text{s.t. } & {\\;P \\: \\epsilon \\: \\mathcal{P}} \\\\\n\\end{aligned}\n\\end{equation}\n\nSince the GMP objective function is the negation of the QAP objective function, any algorithm that solves one can solve the other. \n\n\nThis class is an implementation of the Fast Approximate Quadratic Assignment Problem (FAQ), an algorithm designed to efficiently and accurately solve the QAP, as well as GMP. \n\n[1] Optimierung, Diskrete & Er, Rainer & Ela, A & Burkard, Rainer & Dragoti-Cela, Eranda & Pardalos, Panos & Pitsoulis, Leonidas. (1998). The Quadratic Assignment Problem. Handbook of Combinatorial Optimization. 10.1007/978-1-4613-0303-9_27. ", "_____no_output_____" ] ], [ [ "from graspy.match import GraphMatch as GMP\nfrom graspy.simulations import er_np", "_____no_output_____" ] ], [ [ "For the sake of tutorial, we will use FAQ to solve the GMP for two graphs where we know a solution exists. \nBelow, we sample a binary graph (undirected and no self-loops) $G_1 \\sim ER_{NP}(50, 0.3)$.\nThen, we randomly shuffle the nodes of $G_1$ to initiate $G_2$.\nThe number of edge disagreements as a result of the node shuffle is printed below.", "_____no_output_____" ] ], [ [ "n = 50\np = 0.3\n\nnp.random.seed(1)\nG1 = er_np(n=n, p=p)\nnode_shuffle_input = np.random.permutation(n)\nG2 = G1[np.ix_(node_shuffle_input, node_shuffle_input)]\nprint(\"Number of edge disagreements: \", sum(sum(abs(G1-G2))))", "_____no_output_____" ] ], [ [ "## Visualize the graphs using heat mapping", "_____no_output_____" ] ], [ [ "from graspy.plot import heatmap\nheatmap(G1, cbar=False, title = 'G1 [ER-NP(50, 0.3) Simulation]')\nheatmap(G2, cbar=False, title = 'G2 [G1 Randomly Shuffled]')", "_____no_output_____" ] ], [ [ "Below, we create a model to solve GMP. The model is then fitted for the two graphs $G_1$ and $G_2$. One of the option for the algorithm is the starting position of $P$. In this case, the class default of barycenter intialization is used, or the flat doubly stochastic matrix. The number of edge disagreements is printed below. With zero edge disagreements, we see that FAQ is successful in unshuffling the graph.", "_____no_output_____" ] ], [ [ "gmp = GMP()\ngmp = gmp.fit(G1,G2)\nG2 = G2[np.ix_(gmp.perm_inds_, gmp.perm_inds_)]\nprint(\"Number of edge disagreements: \", sum(sum(abs(G1-G2))))", "_____no_output_____" ], [ "heatmap(G1, cbar=False, title = 'G1[ER-NP(50, 0.3) Simulation]')\nheatmap(G2, cbar=False, title = 'G2[ER-NP(50, 0.3) Randomly Shuffled] unshuffled')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
c52ed179f259077c072e1a7ff11317167fab3994
73,018
ipynb
Jupyter Notebook
NLP/NLP.ipynb
yyjjjj/relay_02
e113aba44d55c8a12a5bff26d4020dddcfa83720
[ "MIT" ]
1
2020-07-27T02:27:38.000Z
2020-07-27T02:27:38.000Z
NLP/NLP.ipynb
yyjjjj/relay_02
e113aba44d55c8a12a5bff26d4020dddcfa83720
[ "MIT" ]
5
2020-08-07T10:37:21.000Z
2020-08-28T13:45:24.000Z
NLP/NLP.ipynb
yyjjjj/relay_02
e113aba44d55c8a12a5bff26d4020dddcfa83720
[ "MIT" ]
18
2020-07-27T02:26:23.000Z
2020-11-22T14:00:41.000Z
140.419231
57,356
0.869087
[ [ [ "cat ratings_train.txt | head -n 10", "id\tdocument\tlabel\r\n9976970\t아 더빙.. 진짜 짜증나네요 목소리\t0\r\n3819312\t흠...포스터보고 초딩영화줄....오버연기조차 가볍지 않구나\t1\r\n10265843\t너무재밓었다그래서보는것을추천한다\t0\r\n9045019\t교도소 이야기구먼 ..솔직히 재미는 없다..평점 조정\t0\r\n6483659\t사이몬페그의 익살스런 연기가 돋보였던 영화!스파이더맨에서 늙어보이기만 했던 커스틴 던스트가 너무나도 이뻐보였다\t1\r\n5403919\t막 걸음마 뗀 3세부터 초등학교 1학년생인 8살용영화.ㅋㅋㅋ...별반개도 아까움.\t0\r\n7797314\t원작의 긴장감을 제대로 살려내지못했다.\t0\r\n9443947\t별 반개도 아깝다 욕나온다 이응경 길용우 연기생활이몇년인지..정말 발로해도 그것보단 낫겟다 납치.감금만반복반복..이드라마는 가족도없다 연기못하는사람만모엿네\t0\r\n7156791\t액션이 없는데도 재미 있는 몇안되는 영화\t1\r\ncat: stdout: Broken pipe\r\n" ], [ "def read_data(filename):\n with open(filename, 'r') as f:\n data = [line.split('\\t') for line in f.read().splitlines()]\n # txt 파일의 헤더(id document label)는 제외하기\n data = data[1:]\n return data\n\ntrain_data = read_data('ratings_train.txt')\ntest_data = read_data('ratings_test.txt')", "_____no_output_____" ], [ "print(len(train_data))\nprint(train_data[0])\nprint(len(test_data))\nprint(len(test_data[0]))", "150000\n['9976970', '아 더빙.. 진짜 짜증나네요 목소리', '0']\n50000\n3\n" ], [ "from konlpy.tag import Okt\n\nokt = Okt()\nprint(okt.pos(u'이 밤 그날의 반딧불을 당신의 창 가까이 보낼게요'))", "[('이', 'Noun'), ('밤', 'Noun'), ('그날', 'Noun'), ('의', 'Josa'), ('반딧불', 'Noun'), ('을', 'Josa'), ('당신', 'Noun'), ('의', 'Josa'), ('창', 'Noun'), ('가까이', 'Noun'), ('보낼게요', 'Verb')]\n" ], [ "import json\nimport os\nfrom pprint import pprint\n\ndef tokenize(doc):\n # norm은 정규화, stem은 근어로 표시하기를 나타냄\n return ['/'.join(t) for t in okt.pos(doc, norm=True, stem=True)]\n\nif os.path.isfile('train_docs.json'):\n with open('train_docs.json') as f:\n train_docs = json.load(f)\n with open('test_docs.json') as f:\n test_docs = json.load(f)\nelse:\n train_docs = [(tokenize(row[1]), row[2]) for row in train_data]\n test_docs = [(tokenize(row[1]), row[2]) for row in test_data]\n # JSON 파일로 저장\n with open('train_docs.json', 'w', encoding=\"utf-8\") as make_file:\n json.dump(train_docs, make_file, ensure_ascii=False, indent=\"\\t\")\n with open('test_docs.json', 'w', encoding=\"utf-8\") as make_file:\n json.dump(test_docs, make_file, ensure_ascii=False, indent=\"\\t\")\n\n# 예쁘게(?) 출력하기 위해서 pprint 라이브러리 사용\npprint(train_docs[0])", "[['아/Exclamation',\n '더빙/Noun',\n '../Punctuation',\n '진짜/Noun',\n '짜증나다/Adjective',\n '목소리/Noun'],\n '0']\n" ], [ "tokens = [t for d in train_docs for t in d[0]]\nprint(len(tokens))", "2159921\n" ], [ "import nltk\ntext = nltk.Text(tokens, name='NMSC')\n\n# 전체 토큰의 개수\nprint(len(text.tokens))\n\n# 중복을 제외한 토큰의 개수\nprint(len(set(text.tokens))) \n\n# 출현 빈도가 높은 상위 토큰 10개\npprint(text.vocab().most_common(10))", "2159921\n49895\n[('./Punctuation', 67778),\n ('영화/Noun', 50818),\n ('하다/Verb', 41209),\n ('이/Josa', 38540),\n ('보다/Verb', 38538),\n ('의/Josa', 30188),\n ('../Punctuation', 29055),\n ('가/Josa', 26627),\n ('에/Josa', 26468),\n ('을/Josa', 23118)]\n" ], [ "import matplotlib.pyplot as plt\nfrom matplotlib import font_manager, rc\n%matplotlib inline\n\nfont_fname = '/Library/Fonts/NanumGothic.ttf'\nfont_name = font_manager.FontProperties(fname=font_fname).get_name()\nrc('font', family=font_name)\n\nplt.figure(figsize=(20,10))\ntext.plot(50)", "_____no_output_____" ], [ "selected_words = [f[0] for f in text.vocab().most_common(100)]\n\ndef term_frequency(doc):\n return [doc.count(word) for word in selected_words]\n\ntrain_x = [term_frequency(d) for d, _ in train_docs]\ntest_x = [term_frequency(d) for d, _ in test_docs]\ntrain_y = [c for _, c in train_docs]\ntest_y = [c for _, c in test_docs]", "_____no_output_____" ], [ "import tensorflow as tf\n\ncheckpoint_path = \"training_1/cp.ckpt\"\ncheckpoint_dir = os.path.dirname(checkpoint_path)\n\ncp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,\n save_weights_only=True,\n verbose=1)", "_____no_output_____" ], [ "import numpy as np\n\nx_train = np.asarray(train_x).astype('float32')\nx_test = np.asarray(test_x).astype('float32')\n\ny_train = np.asarray(train_y).astype('float32')\ny_test = np.asarray(test_y).astype('float32')", "_____no_output_____" ], [ "from tensorflow.keras import models\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import optimizers\nfrom tensorflow.keras import losses\nfrom tensorflow.keras import metrics\n\nmodel = models.Sequential()\nmodel.add(layers.Dense(64, activation='relu', input_shape=(100,)))\nmodel.add(layers.Dense(64, activation='relu'))\nmodel.add(layers.Dense(1, activation='sigmoid'))\n\nmodel.compile(optimizer=optimizers.RMSprop(lr=0.001),\n loss=losses.binary_crossentropy,\n metrics=[metrics.binary_accuracy])\n\nmodel.fit(x_train, y_train, epochs=10, batch_size=512, callbacks=[cp_callback])\nresults = model.evaluate(x_test, y_test)", "Epoch 1/10\n293/293 [==============================] - ETA: 0s - loss: 0.5623 - binary_accuracy: 0.6932\nEpoch 00001: saving model to training_1/cp.ckpt\n293/293 [==============================] - 0s 1ms/step - loss: 0.5623 - binary_accuracy: 0.6932\nEpoch 2/10\n258/293 [=========================>....] - ETA: 0s - loss: 0.5314 - binary_accuracy: 0.7114\nEpoch 00002: saving model to training_1/cp.ckpt\n293/293 [==============================] - 0s 1ms/step - loss: 0.5306 - binary_accuracy: 0.7119\nEpoch 3/10\n256/293 [=========================>....] - ETA: 0s - loss: 0.5217 - binary_accuracy: 0.7189\nEpoch 00003: saving model to training_1/cp.ckpt\n293/293 [==============================] - 0s 1ms/step - loss: 0.5220 - binary_accuracy: 0.7185\nEpoch 4/10\n256/293 [=========================>....] - ETA: 0s - loss: 0.5174 - binary_accuracy: 0.7222\nEpoch 00004: saving model to training_1/cp.ckpt\n293/293 [==============================] - 0s 1ms/step - loss: 0.5161 - binary_accuracy: 0.7227\nEpoch 5/10\n283/293 [===========================>..] - ETA: 0s - loss: 0.5116 - binary_accuracy: 0.7265\nEpoch 00005: saving model to training_1/cp.ckpt\n293/293 [==============================] - 0s 1ms/step - loss: 0.5115 - binary_accuracy: 0.7263\nEpoch 6/10\n263/293 [=========================>....] - ETA: 0s - loss: 0.5070 - binary_accuracy: 0.7287\nEpoch 00006: saving model to training_1/cp.ckpt\n293/293 [==============================] - 0s 1ms/step - loss: 0.5077 - binary_accuracy: 0.7286\nEpoch 7/10\n251/293 [========================>.....] - ETA: 0s - loss: 0.5051 - binary_accuracy: 0.7306\nEpoch 00007: saving model to training_1/cp.ckpt\n293/293 [==============================] - 0s 1ms/step - loss: 0.5048 - binary_accuracy: 0.7309\nEpoch 8/10\n263/293 [=========================>....] - ETA: 0s - loss: 0.5014 - binary_accuracy: 0.7341\nEpoch 00008: saving model to training_1/cp.ckpt\n293/293 [==============================] - 0s 1ms/step - loss: 0.5023 - binary_accuracy: 0.7329\nEpoch 9/10\n265/293 [==========================>...] - ETA: 0s - loss: 0.5006 - binary_accuracy: 0.7349\nEpoch 00009: saving model to training_1/cp.ckpt\n293/293 [==============================] - 0s 1ms/step - loss: 0.5002 - binary_accuracy: 0.7351\nEpoch 10/10\n268/293 [==========================>...] - ETA: 0s - loss: 0.4976 - binary_accuracy: 0.7355\nEpoch 00010: saving model to training_1/cp.ckpt\n293/293 [==============================] - 0s 1ms/step - loss: 0.4984 - binary_accuracy: 0.7350\n1563/1563 [==============================] - 1s 560us/step - loss: 0.5215 - binary_accuracy: 0.7192\n" ], [ "results", "_____no_output_____" ], [ "def predict_pos_neg(review):\n token = tokenize(review)\n tf = term_frequency(token)\n data = np.expand_dims(np.asarray(tf).astype('float32'), axis=0)\n score = float(model.predict(data))\n if(score > 0.5):\n #print(\"[{}]는 {:.2f}% 확률로 긍정 리뷰이지 않을까 추측해봅니다.^^\\n\".format(review, score * 100))\n return score\n else:\n #print(\"[{}]는 {:.2f}% 확률로 부정 리뷰이지 않을까 추측해봅니다.^^;\\n\".format(review, (1 - score) * 100))\n return -score", "_____no_output_____" ], [ "'''\npredict_pos_neg(\"올해 최고의 영화! 세 번 넘게 봐도 질리지가 않네요.\")\npredict_pos_neg(\"배경 음악이 영화의 분위기랑 너무 안 맞았습니다. 몰입에 방해가 됩니다.\")\npredict_pos_neg(\"주연 배우가 신인인데 연기를 진짜 잘 하네요. 몰입감 ㅎㄷㄷ\")\npredict_pos_neg(\"믿고 보는 감독이지만 이번에는 아니네요\")\npredict_pos_neg(\"주연배우 때문에 봤어요\")\n'''", "_____no_output_____" ], [ "'''\npredict_pos_neg(\"혹시 UCPC 본선\")\npredict_pos_neg(\"진출한 팀 있나요\")\npredict_pos_neg(\"3분 후 본선 끝나는데\")\npredict_pos_neg(\"우리팀만 했나 해서..\")\n'''", "_____no_output_____" ], [ "def read_chat_data(filename):\n with open(filename, 'r') as f:\n data = [line.split('\\t') for line in f.read().splitlines()]\n\n return data", "_____no_output_____" ], [ "def make_score_dictionary(data):\n score_list = {}\n for chat in data:\n if(chat[0] in score_list):\n score_list[chat[0]].append(predict_pos_neg(chat[1]))\n else:\n score_list[chat[0]] = [predict_pos_neg(chat[1])]\n\n return score_list", "_____no_output_____" ], [ "def make_average_score_dictionary(score_dictionary):\n average_score_dictionary = {}\n for key in score_dictionary.keys():\n average_score_dictionary[key] = sum(score_dictionary[key])/len(score_dictionary[key])\n \n return average_score_dictionary", "_____no_output_____" ], [ "print(make_average_score_dictionary(make_score_dictionary(read_chat_data('test.txt'))))", "{'김다은': 0.16959591616283765, '령희': 0.5202129483222961, '조효은': -0.2089512050151825, '김동민': 0.5202129483222961, '예진': 0.5202129483222961}\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
c52ed2cd997d8f6551f1f122b8430aef4f3d6153
367,605
ipynb
Jupyter Notebook
Vehicle_Number_Plate_Detection(Using_YOLOv3).ipynb
SwarnenduGanguli25/Vehicle_Number_Plate_Recognition
d65004755bacd22326420b5e824a112598417d8f
[ "MIT" ]
null
null
null
Vehicle_Number_Plate_Detection(Using_YOLOv3).ipynb
SwarnenduGanguli25/Vehicle_Number_Plate_Recognition
d65004755bacd22326420b5e824a112598417d8f
[ "MIT" ]
null
null
null
Vehicle_Number_Plate_Detection(Using_YOLOv3).ipynb
SwarnenduGanguli25/Vehicle_Number_Plate_Recognition
d65004755bacd22326420b5e824a112598417d8f
[ "MIT" ]
null
null
null
96.080763
1,690
0.576238
[ [ [ "# VEHICLE_NUMBER_PLATE_RECOGNITION", "_____no_output_____" ], [ "## PART-1(DETECTION)", "_____no_output_____" ], [ "#### 1. Importing required Libraries", "_____no_output_____" ] ], [ [ "! pip install pydrive \nimport os\nfrom pydrive.auth import GoogleAuth\nfrom pydrive.drive import GoogleDrive\nfrom google.colab import auth\nfrom oauth2client.client import GoogleCredentials\n# 1. Authenticate and create the PyDrive client.\nauth.authenticate_user()\ngauth = GoogleAuth()\ngauth.credentials = GoogleCredentials.get_application_default()\ndrive = GoogleDrive(gauth)\nlocal_download_path = os.path.expanduser('~/data')\ntry:\n os.makedirs(local_download_path)\nexcept: pass\nfrom google.colab import drive\ndrive.mount('/content/drive',force_remount=True)", "Requirement already satisfied: pydrive in /usr/local/lib/python3.6/dist-packages (1.3.1)\nRequirement already satisfied: oauth2client>=4.0.0 in /usr/local/lib/python3.6/dist-packages (from pydrive) (4.1.3)\nRequirement already satisfied: google-api-python-client>=1.2 in /usr/local/lib/python3.6/dist-packages (from pydrive) (1.7.12)\nRequirement already satisfied: PyYAML>=3.0 in /usr/local/lib/python3.6/dist-packages (from pydrive) (3.13)\nRequirement already satisfied: rsa>=3.1.4 in /usr/local/lib/python3.6/dist-packages (from oauth2client>=4.0.0->pydrive) (4.7)\nRequirement already satisfied: pyasn1>=0.1.7 in /usr/local/lib/python3.6/dist-packages (from oauth2client>=4.0.0->pydrive) (0.4.8)\nRequirement already satisfied: httplib2>=0.9.1 in /usr/local/lib/python3.6/dist-packages (from oauth2client>=4.0.0->pydrive) (0.17.4)\nRequirement already satisfied: pyasn1-modules>=0.0.5 in /usr/local/lib/python3.6/dist-packages (from oauth2client>=4.0.0->pydrive) (0.2.8)\nRequirement already satisfied: six>=1.6.1 in /usr/local/lib/python3.6/dist-packages (from oauth2client>=4.0.0->pydrive) (1.15.0)\nRequirement already satisfied: google-auth>=1.4.1 in /usr/local/lib/python3.6/dist-packages (from google-api-python-client>=1.2->pydrive) (1.17.2)\nRequirement already satisfied: google-auth-httplib2>=0.0.3 in /usr/local/lib/python3.6/dist-packages (from google-api-python-client>=1.2->pydrive) (0.0.4)\nRequirement already satisfied: uritemplate<4dev,>=3.0.0 in /usr/local/lib/python3.6/dist-packages (from google-api-python-client>=1.2->pydrive) (3.0.1)\nRequirement already satisfied: cachetools<5.0,>=2.0.0 in /usr/local/lib/python3.6/dist-packages (from google-auth>=1.4.1->google-api-python-client>=1.2->pydrive) (4.2.1)\nRequirement already satisfied: setuptools>=40.3.0 in /usr/local/lib/python3.6/dist-packages (from google-auth>=1.4.1->google-api-python-client>=1.2->pydrive) (51.3.3)\nMounted at /content/drive\n" ], [ "%cd 'drive/My Drive'", "/content/drive/My Drive\n" ] ], [ [ "#### 2. Installing required configs for darknet to use YOLOv3", "_____no_output_____" ] ], [ [ "!apt-get update > /dev/null\n!apt-get upgrade > /dev/null\n!apt-get install build-essential > /dev/null\n!apt-get install cmake git libgtk2.0-dev pkg-config libavcodec-dev libavformat-dev libswscale-dev > /dev/null\n!apt-get install libopencv-dev > /dev/null\n!apt-get install libavcodec-dev libavformat-dev libswscale-d > /dev/null", "E: Unable to locate package libswscale-d\n" ], [ "%cd darknet\n!sed -i 's/OPENCV=1/OPENCV=0/g' Makefile\n!sed -i 's/GPU=0/GPU=1/g' Makefile\n!sed -i 's/CUDNN=0/CUDNN=1/g' Makefile\n!sed -i 's/CUDNN_HALF=0/CUDNN_HALF=1/g' Makefile\n\n!make", "/content/drive/My Drive/darknet\nchmod +x *.sh\ng++ -std=c++11 -std=c++11 -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/image_opencv.cpp -o obj/image_opencv.o\ng++ -std=c++11 -std=c++11 -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/http_stream.cpp -o obj/http_stream.o\n\u001b[01m\u001b[K./src/http_stream.cpp:\u001b[m\u001b[K In member function ‘\u001b[01m\u001b[Kbool JSON_sender::write(const char*)\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/http_stream.cpp:249:21:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kn\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n int \u001b[01;35m\u001b[Kn\u001b[m\u001b[K = _write(client, outputbuf, outlen);\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/http_stream.cpp:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kvoid set_track_id(detection*, int, float, float, float, int, int, int)\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/http_stream.cpp:863:27:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kcomparison between signed and unsigned integer expressions [\u001b[01;35m\u001b[K-Wsign-compare\u001b[m\u001b[K]\n for (int i = 0; \u001b[01;35m\u001b[Ki < v.size()\u001b[m\u001b[K; ++i) {\n \u001b[01;35m\u001b[K~~^~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/http_stream.cpp:871:33:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kcomparison between signed and unsigned integer expressions [\u001b[01;35m\u001b[K-Wsign-compare\u001b[m\u001b[K]\n for (int old_id = 0; \u001b[01;35m\u001b[Kold_id < old_dets.size()\u001b[m\u001b[K; ++old_id) {\n \u001b[01;35m\u001b[K~~~~~~~^~~~~~~~~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/http_stream.cpp:890:31:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kcomparison between signed and unsigned integer expressions [\u001b[01;35m\u001b[K-Wsign-compare\u001b[m\u001b[K]\n for (int index = 0; \u001b[01;35m\u001b[Kindex < new_dets_num*old_dets.size()\u001b[m\u001b[K; ++index) {\n \u001b[01;35m\u001b[K~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/http_stream.cpp:926:28:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kcomparison between signed and unsigned integer expressions [\u001b[01;35m\u001b[K-Wsign-compare\u001b[m\u001b[K]\n if (\u001b[01;35m\u001b[Kold_dets_dq.size() > deque_size\u001b[m\u001b[K) old_dets_dq.pop_front();\n \u001b[01;35m\u001b[K~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~\u001b[m\u001b[K\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/gemm.c -o obj/gemm.o\n\u001b[01m\u001b[K./src/gemm.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kconvolution_2d\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/gemm.c:2038:15:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kout_w\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n const int \u001b[01;35m\u001b[Kout_w\u001b[m\u001b[K = (w + 2 * pad - ksize) / stride + 1; // output_width=input_width for stride=1 and pad=1\n \u001b[01;35m\u001b[K^~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/gemm.c:2037:15:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kout_h\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n const int \u001b[01;35m\u001b[Kout_h\u001b[m\u001b[K = (h + 2 * pad - ksize) / stride + 1; // output_height=input_height for stride=1 and pad=1\n \u001b[01;35m\u001b[K^~~~~\u001b[m\u001b[K\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/utils.c -o obj/utils.o\n\u001b[01m\u001b[K./src/utils.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kcustom_hash\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/utils.c:1045:12:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Ksuggest parentheses around assignment used as truth value [\u001b[01;35m\u001b[K-Wparentheses\u001b[m\u001b[K]\n while (\u001b[01;35m\u001b[Kc\u001b[m\u001b[K = *str++)\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/dark_cuda.c -o obj/dark_cuda.o\n\u001b[01m\u001b[K./src/dark_cuda.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kcudnn_check_error_extended\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/dark_cuda.c:224:20:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kcomparison between ‘\u001b[01m\u001b[KcudaError_t {aka enum cudaError}\u001b[m\u001b[K’ and ‘\u001b[01m\u001b[Kenum <anonymous>\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wenum-compare\u001b[m\u001b[K]\n if (status \u001b[01;35m\u001b[K!=\u001b[m\u001b[K CUDNN_STATUS_SUCCESS)\n \u001b[01;35m\u001b[K^~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/dark_cuda.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kpre_allocate_pinned_memory\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/dark_cuda.c:276:40:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kformat ‘\u001b[01m\u001b[K%u\u001b[m\u001b[K’ expects argument of type ‘\u001b[01m\u001b[Kunsigned int\u001b[m\u001b[K’, but argument 2 has type ‘\u001b[01m\u001b[Klong unsigned int\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wformat=\u001b[m\u001b[K]\n printf(\"pre_allocate: size = \u001b[01;35m\u001b[K%Iu\u001b[m\u001b[K MB, num_of_blocks = %Iu, block_size = %Iu MB \\n\",\n \u001b[01;35m\u001b[K~~^\u001b[m\u001b[K\n \u001b[32m\u001b[K%Ilu\u001b[m\u001b[K\n \u001b[32m\u001b[Ksize / (1024*1024)\u001b[m\u001b[K, num_of_blocks, pinned_block_size / (1024 * 1024));\n \u001b[32m\u001b[K~~~~~~~~~~~~~~~~~~\u001b[m\u001b[K \n\u001b[01m\u001b[K./src/dark_cuda.c:276:64:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kformat ‘\u001b[01m\u001b[K%u\u001b[m\u001b[K’ expects argument of type ‘\u001b[01m\u001b[Kunsigned int\u001b[m\u001b[K’, but argument 3 has type ‘\u001b[01m\u001b[Ksize_t {aka const long unsigned int}\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wformat=\u001b[m\u001b[K]\n printf(\"pre_allocate: size = %Iu MB, num_of_blocks = \u001b[01;35m\u001b[K%Iu\u001b[m\u001b[K, block_size = %Iu MB \\n\",\n \u001b[01;35m\u001b[K~~^\u001b[m\u001b[K\n \u001b[32m\u001b[K%Ilu\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/dark_cuda.c:276:82:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kformat ‘\u001b[01m\u001b[K%u\u001b[m\u001b[K’ expects argument of type ‘\u001b[01m\u001b[Kunsigned int\u001b[m\u001b[K’, but argument 4 has type ‘\u001b[01m\u001b[Klong unsigned int\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wformat=\u001b[m\u001b[K]\n printf(\"pre_allocate: size = %Iu MB, num_of_blocks = %Iu, block_size = \u001b[01;35m\u001b[K%Iu\u001b[m\u001b[K MB \\n\",\n \u001b[01;35m\u001b[K~~^\u001b[m\u001b[K\n \u001b[32m\u001b[K%Ilu\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/dark_cuda.c:286:37:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kformat ‘\u001b[01m\u001b[K%d\u001b[m\u001b[K’ expects argument of type ‘\u001b[01m\u001b[Kint\u001b[m\u001b[K’, but argument 2 has type ‘\u001b[01m\u001b[Ksize_t {aka const long unsigned int}\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wformat=\u001b[m\u001b[K]\n printf(\" Allocated \u001b[01;35m\u001b[K%d\u001b[m\u001b[K pinned block \\n\", pinned_block_size);\n \u001b[01;35m\u001b[K~^\u001b[m\u001b[K\n \u001b[32m\u001b[K%ld\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/dark_cuda.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kcuda_make_array_pinned_preallocated\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/dark_cuda.c:307:43:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kformat ‘\u001b[01m\u001b[K%d\u001b[m\u001b[K’ expects argument of type ‘\u001b[01m\u001b[Kint\u001b[m\u001b[K’, but argument 2 has type ‘\u001b[01m\u001b[Ksize_t {aka long unsigned int}\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wformat=\u001b[m\u001b[K]\n printf(\"\\n Pinned block_id = \u001b[01;35m\u001b[K%d\u001b[m\u001b[K, filled = %f %% \\n\", pinned_block_id, filled);\n \u001b[01;35m\u001b[K~^\u001b[m\u001b[K\n \u001b[32m\u001b[K%ld\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/dark_cuda.c:322:64:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kformat ‘\u001b[01m\u001b[K%d\u001b[m\u001b[K’ expects argument of type ‘\u001b[01m\u001b[Kint\u001b[m\u001b[K’, but argument 2 has type ‘\u001b[01m\u001b[Klong unsigned int\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wformat=\u001b[m\u001b[K]\n printf(\"Try to allocate new pinned memory, size = \u001b[01;35m\u001b[K%d\u001b[m\u001b[K MB \\n\", \u001b[32m\u001b[Ksize / (1024 * 1024)\u001b[m\u001b[K);\n \u001b[01;35m\u001b[K~^\u001b[m\u001b[K \u001b[32m\u001b[K~~~~~~~~~~~~~~~~~~~~\u001b[m\u001b[K\n \u001b[32m\u001b[K%ld\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/dark_cuda.c:328:63:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kformat ‘\u001b[01m\u001b[K%d\u001b[m\u001b[K’ expects argument of type ‘\u001b[01m\u001b[Kint\u001b[m\u001b[K’, but argument 2 has type ‘\u001b[01m\u001b[Klong unsigned int\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wformat=\u001b[m\u001b[K]\n printf(\"Try to allocate new pinned BLOCK, size = \u001b[01;35m\u001b[K%d\u001b[m\u001b[K MB \\n\", \u001b[32m\u001b[Ksize / (1024 * 1024)\u001b[m\u001b[K);\n \u001b[01;35m\u001b[K~^\u001b[m\u001b[K \u001b[32m\u001b[K~~~~~~~~~~~~~~~~~~~~\u001b[m\u001b[K\n \u001b[32m\u001b[K%ld\u001b[m\u001b[K\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/convolutional_layer.c -o obj/convolutional_layer.o\n\u001b[01m\u001b[K./src/convolutional_layer.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kforward_convolutional_layer\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/convolutional_layer.c:1341:32:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kt_intput_size\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n size_t \u001b[01;35m\u001b[Kt_intput_size\u001b[m\u001b[K = binary_transpose_align_input(k, n, state.workspace, &l.t_bit_input, ldb_align, l.bit_align);\n \u001b[01;35m\u001b[K^~~~~~~~~~~~~\u001b[m\u001b[K\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/list.c -o obj/list.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/image.c -o obj/image.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/activations.c -o obj/activations.o\n\u001b[01m\u001b[K./src/activations.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kactivate\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/activations.c:79:5:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kenumeration value ‘\u001b[01m\u001b[KRELU6\u001b[m\u001b[K’ not handled in switch [\u001b[01;35m\u001b[K-Wswitch\u001b[m\u001b[K]\n \u001b[01;35m\u001b[Kswitch\u001b[m\u001b[K(a){\n \u001b[01;35m\u001b[K^~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/activations.c:79:5:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kenumeration value ‘\u001b[01m\u001b[KSWISH\u001b[m\u001b[K’ not handled in switch [\u001b[01;35m\u001b[K-Wswitch\u001b[m\u001b[K]\n\u001b[01m\u001b[K./src/activations.c:79:5:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kenumeration value ‘\u001b[01m\u001b[KMISH\u001b[m\u001b[K’ not handled in switch [\u001b[01;35m\u001b[K-Wswitch\u001b[m\u001b[K]\n\u001b[01m\u001b[K./src/activations.c:79:5:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kenumeration value ‘\u001b[01m\u001b[KHARD_MISH\u001b[m\u001b[K’ not handled in switch [\u001b[01;35m\u001b[K-Wswitch\u001b[m\u001b[K]\n\u001b[01m\u001b[K./src/activations.c:79:5:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kenumeration value ‘\u001b[01m\u001b[KNORM_CHAN\u001b[m\u001b[K’ not handled in switch [\u001b[01;35m\u001b[K-Wswitch\u001b[m\u001b[K]\n\u001b[01m\u001b[K./src/activations.c:79:5:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kenumeration value ‘\u001b[01m\u001b[KNORM_CHAN_SOFTMAX\u001b[m\u001b[K’ not handled in switch [\u001b[01;35m\u001b[K-Wswitch\u001b[m\u001b[K]\n\u001b[01m\u001b[K./src/activations.c:79:5:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kenumeration value ‘\u001b[01m\u001b[KNORM_CHAN_SOFTMAX_MAXVAL\u001b[m\u001b[K’ not handled in switch [\u001b[01;35m\u001b[K-Wswitch\u001b[m\u001b[K]\n\u001b[01m\u001b[K./src/activations.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kgradient\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/activations.c:310:5:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kenumeration value ‘\u001b[01m\u001b[KSWISH\u001b[m\u001b[K’ not handled in switch [\u001b[01;35m\u001b[K-Wswitch\u001b[m\u001b[K]\n \u001b[01;35m\u001b[Kswitch\u001b[m\u001b[K(a){\n \u001b[01;35m\u001b[K^~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/activations.c:310:5:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kenumeration value ‘\u001b[01m\u001b[KMISH\u001b[m\u001b[K’ not handled in switch [\u001b[01;35m\u001b[K-Wswitch\u001b[m\u001b[K]\n\u001b[01m\u001b[K./src/activations.c:310:5:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kenumeration value ‘\u001b[01m\u001b[KHARD_MISH\u001b[m\u001b[K’ not handled in switch [\u001b[01;35m\u001b[K-Wswitch\u001b[m\u001b[K]\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/im2col.c -o obj/im2col.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/col2im.c -o obj/col2im.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/blas.c -o obj/blas.o\n\u001b[01m\u001b[K./src/blas.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kbackward_shortcut_multilayer_cpu\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/blas.c:207:21:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kout_index\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n int \u001b[01;35m\u001b[Kout_index\u001b[m\u001b[K = id;\n \u001b[01;35m\u001b[K^~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/blas.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kfind_sim\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/blas.c:597:59:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kformat ‘\u001b[01m\u001b[K%d\u001b[m\u001b[K’ expects argument of type ‘\u001b[01m\u001b[Kint\u001b[m\u001b[K’, but argument 2 has type ‘\u001b[01m\u001b[Ksize_t {aka long unsigned int}\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wformat=\u001b[m\u001b[K]\n printf(\" Error: find_sim(): sim isn't found: i = \u001b[01;35m\u001b[K%d\u001b[m\u001b[K, j = %d, z = %d \\n\", i, j, z);\n \u001b[01;35m\u001b[K~^\u001b[m\u001b[K\n \u001b[32m\u001b[K%ld\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/blas.c:597:67:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kformat ‘\u001b[01m\u001b[K%d\u001b[m\u001b[K’ expects argument of type ‘\u001b[01m\u001b[Kint\u001b[m\u001b[K’, but argument 3 has type ‘\u001b[01m\u001b[Ksize_t {aka long unsigned int}\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wformat=\u001b[m\u001b[K]\n printf(\" Error: find_sim(): sim isn't found: i = %d, j = \u001b[01;35m\u001b[K%d\u001b[m\u001b[K, z = %d \\n\", i, j, z);\n \u001b[01;35m\u001b[K~^\u001b[m\u001b[K\n \u001b[32m\u001b[K%ld\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/blas.c:597:75:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kformat ‘\u001b[01m\u001b[K%d\u001b[m\u001b[K’ expects argument of type ‘\u001b[01m\u001b[Kint\u001b[m\u001b[K’, but argument 4 has type ‘\u001b[01m\u001b[Ksize_t {aka long unsigned int}\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wformat=\u001b[m\u001b[K]\n printf(\" Error: find_sim(): sim isn't found: i = %d, j = %d, z = \u001b[01;35m\u001b[K%d\u001b[m\u001b[K \\n\", i, j, z);\n \u001b[01;35m\u001b[K~^\u001b[m\u001b[K\n \u001b[32m\u001b[K%ld\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/blas.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kfind_P_constrastive\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/blas.c:611:68:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kformat ‘\u001b[01m\u001b[K%d\u001b[m\u001b[K’ expects argument of type ‘\u001b[01m\u001b[Kint\u001b[m\u001b[K’, but argument 2 has type ‘\u001b[01m\u001b[Ksize_t {aka long unsigned int}\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wformat=\u001b[m\u001b[K]\n printf(\" Error: find_P_constrastive(): P isn't found: i = \u001b[01;35m\u001b[K%d\u001b[m\u001b[K, j = %d, z = %d \\n\", i, j, z);\n \u001b[01;35m\u001b[K~^\u001b[m\u001b[K\n \u001b[32m\u001b[K%ld\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/blas.c:611:76:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kformat ‘\u001b[01m\u001b[K%d\u001b[m\u001b[K’ expects argument of type ‘\u001b[01m\u001b[Kint\u001b[m\u001b[K’, but argument 3 has type ‘\u001b[01m\u001b[Ksize_t {aka long unsigned int}\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wformat=\u001b[m\u001b[K]\n printf(\" Error: find_P_constrastive(): P isn't found: i = %d, j = \u001b[01;35m\u001b[K%d\u001b[m\u001b[K, z = %d \\n\", i, j, z);\n \u001b[01;35m\u001b[K~^\u001b[m\u001b[K\n \u001b[32m\u001b[K%ld\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/blas.c:611:84:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kformat ‘\u001b[01m\u001b[K%d\u001b[m\u001b[K’ expects argument of type ‘\u001b[01m\u001b[Kint\u001b[m\u001b[K’, but argument 4 has type ‘\u001b[01m\u001b[Ksize_t {aka long unsigned int}\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wformat=\u001b[m\u001b[K]\n printf(\" Error: find_P_constrastive(): P isn't found: i = %d, j = %d, z = \u001b[01;35m\u001b[K%d\u001b[m\u001b[K \\n\", i, j, z);\n \u001b[01;35m\u001b[K~^\u001b[m\u001b[K\n \u001b[32m\u001b[K%ld\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/blas.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[KP_constrastive_f\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/blas.c:651:79:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kformat ‘\u001b[01m\u001b[K%d\u001b[m\u001b[K’ expects argument of type ‘\u001b[01m\u001b[Kint\u001b[m\u001b[K’, but argument 3 has type ‘\u001b[01m\u001b[Ksize_t {aka long unsigned int}\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wformat=\u001b[m\u001b[K]\n fprintf(stderr, \" Error: in P_constrastive must be i != l, while i = \u001b[01;35m\u001b[K%d\u001b[m\u001b[K, l = %d \\n\", i, l);\n \u001b[01;35m\u001b[K~^\u001b[m\u001b[K\n \u001b[32m\u001b[K%ld\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/blas.c:651:87:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kformat ‘\u001b[01m\u001b[K%d\u001b[m\u001b[K’ expects argument of type ‘\u001b[01m\u001b[Kint\u001b[m\u001b[K’, but argument 4 has type ‘\u001b[01m\u001b[Ksize_t {aka long unsigned int}\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wformat=\u001b[m\u001b[K]\n fprintf(stderr, \" Error: in P_constrastive must be i != l, while i = %d, l = \u001b[01;35m\u001b[K%d\u001b[m\u001b[K \\n\", i, l);\n \u001b[01;35m\u001b[K~^\u001b[m\u001b[K\n \u001b[32m\u001b[K%ld\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/blas.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[KP_constrastive\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/blas.c:785:79:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kformat ‘\u001b[01m\u001b[K%d\u001b[m\u001b[K’ expects argument of type ‘\u001b[01m\u001b[Kint\u001b[m\u001b[K’, but argument 3 has type ‘\u001b[01m\u001b[Ksize_t {aka long unsigned int}\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wformat=\u001b[m\u001b[K]\n fprintf(stderr, \" Error: in P_constrastive must be i != l, while i = \u001b[01;35m\u001b[K%d\u001b[m\u001b[K, l = %d \\n\", i, l);\n \u001b[01;35m\u001b[K~^\u001b[m\u001b[K\n \u001b[32m\u001b[K%ld\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/blas.c:785:87:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kformat ‘\u001b[01m\u001b[K%d\u001b[m\u001b[K’ expects argument of type ‘\u001b[01m\u001b[Kint\u001b[m\u001b[K’, but argument 4 has type ‘\u001b[01m\u001b[Ksize_t {aka long unsigned int}\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wformat=\u001b[m\u001b[K]\n fprintf(stderr, \" Error: in P_constrastive must be i != l, while i = %d, l = \u001b[01;35m\u001b[K%d\u001b[m\u001b[K \\n\", i, l);\n \u001b[01;35m\u001b[K~^\u001b[m\u001b[K\n \u001b[32m\u001b[K%ld\u001b[m\u001b[K\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/crop_layer.c -o obj/crop_layer.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/dropout_layer.c -o obj/dropout_layer.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/maxpool_layer.c -o obj/maxpool_layer.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/softmax_layer.c -o obj/softmax_layer.o\n\u001b[01m\u001b[K./src/softmax_layer.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kmake_contrastive_layer\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/softmax_layer.c:203:101:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kformat ‘\u001b[01m\u001b[K%d\u001b[m\u001b[K’ expects argument of type ‘\u001b[01m\u001b[Kint\u001b[m\u001b[K’, but argument 9 has type ‘\u001b[01m\u001b[Ksize_t {aka const long unsigned int}\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wformat=\u001b[m\u001b[K]\n fprintf(stderr, \"contrastive %4d x%4d x%4d x emb_size %4d x batch: %4d classes = %4d, step = \u001b[01;35m\u001b[K%4d\u001b[m\u001b[K \\n\", w, h, l.n, l.embedding_size, batch, l.classes, step);\n \u001b[01;35m\u001b[K~~^\u001b[m\u001b[K\n \u001b[32m\u001b[K%4ld\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/softmax_layer.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kforward_contrastive_layer\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/softmax_layer.c:244:27:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kvariable ‘\u001b[01m\u001b[Kmax_truth\u001b[m\u001b[K’ set but not used [\u001b[01;35m\u001b[K-Wunused-but-set-variable\u001b[m\u001b[K]\n float \u001b[01;35m\u001b[Kmax_truth\u001b[m\u001b[K = 0;\n \u001b[01;35m\u001b[K^~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/softmax_layer.c:423:71:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kformat ‘\u001b[01m\u001b[K%d\u001b[m\u001b[K’ expects argument of type ‘\u001b[01m\u001b[Kint\u001b[m\u001b[K’, but argument 2 has type ‘\u001b[01m\u001b[Ksize_t {aka const long unsigned int}\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wformat=\u001b[m\u001b[K]\n printf(\" Error: too large number of bboxes: contr_size = \u001b[01;35m\u001b[K%d\u001b[m\u001b[K > max_contr_size = %d \\n\", contr_size, max_contr_size);\n \u001b[01;35m\u001b[K~^\u001b[m\u001b[K\n \u001b[32m\u001b[K%ld\u001b[m\u001b[K\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/data.c -o obj/data.o\n\u001b[01m\u001b[K./src/data.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kload_data_detection\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/data.c:1409:43:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kvariable ‘\u001b[01m\u001b[Kr_scale\u001b[m\u001b[K’ set but not used [\u001b[01;35m\u001b[K-Wunused-but-set-variable\u001b[m\u001b[K]\n float r1 = 0, r2 = 0, r3 = 0, r4 = 0, \u001b[01;35m\u001b[Kr_scale\u001b[m\u001b[K;\n \u001b[01;35m\u001b[K^~~~~~~\u001b[m\u001b[K\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/matrix.c -o obj/matrix.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/network.c -o obj/network.o\n\u001b[01m\u001b[K./src/network.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Ktrain_network_waitkey\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/network.c:433:13:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kema_period\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n int \u001b[01;35m\u001b[Kema_period\u001b[m\u001b[K = (net.max_batches - ema_start_point - 1000) * (1.0 - net.ema_alpha);\n \u001b[01;35m\u001b[K^~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/network.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kresize_network\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/network.c:658:42:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kpassing argument 1 of ‘\u001b[01m\u001b[KcudaHostAlloc\u001b[m\u001b[K’ from incompatible pointer type [\u001b[01;35m\u001b[K-Wincompatible-pointer-types\u001b[m\u001b[K]\n if (cudaSuccess == cudaHostAlloc(\u001b[01;35m\u001b[K&\u001b[m\u001b[Knet->input_pinned_cpu, size * sizeof(float), cudaHostRegisterMapped))\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\nIn file included from \u001b[01m\u001b[K/usr/local/cuda/include/cuda_runtime.h:96:0\u001b[m\u001b[K,\n from \u001b[01m\u001b[Kinclude/darknet.h:41\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/network.c:1\u001b[m\u001b[K:\n\u001b[01m\u001b[K/usr/local/cuda/include/cuda_runtime_api.h:4391:39:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kexpected ‘\u001b[01m\u001b[Kvoid **\u001b[m\u001b[K’ but argument is of type ‘\u001b[01m\u001b[Kfloat **\u001b[m\u001b[K’\n extern __host__ cudaError_t CUDARTAPI \u001b[01;36m\u001b[KcudaHostAlloc\u001b[m\u001b[K(void **pHost, size_t size, unsigned int flags);\n \u001b[01;36m\u001b[K^~~~~~~~~~~~~\u001b[m\u001b[K\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/connected_layer.c -o obj/connected_layer.o\n\u001b[01m\u001b[K./src/connected_layer.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kforward_connected_layer_gpu\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/connected_layer.c:346:11:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kone\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float \u001b[01;35m\u001b[Kone\u001b[m\u001b[K = 1; // alpha[0], beta[0]\n \u001b[01;35m\u001b[K^~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/connected_layer.c:344:13:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kc\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float * \u001b[01;35m\u001b[Kc\u001b[m\u001b[K = l.output_gpu;\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/connected_layer.c:343:13:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kb\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float * \u001b[01;35m\u001b[Kb\u001b[m\u001b[K = l.weights_gpu;\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/connected_layer.c:342:13:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Ka\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float * \u001b[01;35m\u001b[Ka\u001b[m\u001b[K = state.input;\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/connected_layer.c:341:9:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kn\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n int \u001b[01;35m\u001b[Kn\u001b[m\u001b[K = l.outputs;\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/connected_layer.c:340:9:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kk\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n int \u001b[01;35m\u001b[Kk\u001b[m\u001b[K = l.inputs;\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/connected_layer.c:339:9:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Km\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n int \u001b[01;35m\u001b[Km\u001b[m\u001b[K = l.batch;\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/cost_layer.c -o obj/cost_layer.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/parser.c -o obj/parser.o\n\u001b[01m\u001b[K./src/parser.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kparse_network_cfg_custom\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/parser.c:1680:42:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kpassing argument 1 of ‘\u001b[01m\u001b[KcudaHostAlloc\u001b[m\u001b[K’ from incompatible pointer type [\u001b[01;35m\u001b[K-Wincompatible-pointer-types\u001b[m\u001b[K]\n if (cudaSuccess == cudaHostAlloc(\u001b[01;35m\u001b[K&\u001b[m\u001b[Knet.input_pinned_cpu, size * sizeof(float), cudaHostRegisterMapped)) net.input_pinned_cpu_flag = 1;\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\nIn file included from \u001b[01m\u001b[K/usr/local/cuda/include/cuda_runtime.h:96:0\u001b[m\u001b[K,\n from \u001b[01m\u001b[Kinclude/darknet.h:41\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/activations.h:3\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/activation_layer.h:4\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/parser.c:6\u001b[m\u001b[K:\n\u001b[01m\u001b[K/usr/local/cuda/include/cuda_runtime_api.h:4391:39:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kexpected ‘\u001b[01m\u001b[Kvoid **\u001b[m\u001b[K’ but argument is of type ‘\u001b[01m\u001b[Kfloat **\u001b[m\u001b[K’\n extern __host__ cudaError_t CUDARTAPI \u001b[01;36m\u001b[KcudaHostAlloc\u001b[m\u001b[K(void **pHost, size_t size, unsigned int flags);\n \u001b[01;36m\u001b[K^~~~~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/parser.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kget_classes_multipliers\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/parser.c:428:29:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kargument 1 range [18446744071562067968, 18446744073709551615] exceeds maximum object size 9223372036854775807 [\u001b[01;35m\u001b[K-Walloc-size-larger-than=\u001b[m\u001b[K]\n \u001b[01;35m\u001b[Kclasses_multipliers = (float *)calloc(classes_counters, sizeof(float))\u001b[m\u001b[K;\n \u001b[01;35m\u001b[K~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\u001b[m\u001b[K\nIn file included from \u001b[01m\u001b[K./src/parser.c:3:0\u001b[m\u001b[K:\n\u001b[01m\u001b[K/usr/include/stdlib.h:541:14:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kin a call to allocation function ‘\u001b[01m\u001b[Kcalloc\u001b[m\u001b[K’ declared here\n extern void *\u001b[01;36m\u001b[Kcalloc\u001b[m\u001b[K (size_t __nmemb, size_t __size)\n \u001b[01;36m\u001b[K^~~~~~\u001b[m\u001b[K\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/option_list.c -o obj/option_list.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/darknet.c -o obj/darknet.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/detection_layer.c -o obj/detection_layer.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/captcha.c -o obj/captcha.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/route_layer.c -o obj/route_layer.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/writing.c -o obj/writing.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/box.c -o obj/box.o\n\u001b[01m\u001b[K./src/box.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kbox_iou_kind\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/box.c:154:5:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kenumeration value ‘\u001b[01m\u001b[KMSE\u001b[m\u001b[K’ not handled in switch [\u001b[01;35m\u001b[K-Wswitch\u001b[m\u001b[K]\n \u001b[01;35m\u001b[Kswitch\u001b[m\u001b[K(iou_kind) {\n \u001b[01;35m\u001b[K^~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/box.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kdiounms_sort\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/box.c:898:27:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kbeta_prob\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float \u001b[01;35m\u001b[Kbeta_prob\u001b[m\u001b[K = pow(dets[j].prob[k], 2) / sum_prob;\n \u001b[01;35m\u001b[K^~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/box.c:897:27:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kalpha_prob\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float \u001b[01;35m\u001b[Kalpha_prob\u001b[m\u001b[K = pow(dets[i].prob[k], 2) / sum_prob;\n \u001b[01;35m\u001b[K^~~~~~~~~~\u001b[m\u001b[K\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/nightmare.c -o obj/nightmare.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/normalization_layer.c -o obj/normalization_layer.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/avgpool_layer.c -o obj/avgpool_layer.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/coco.c -o obj/coco.o\n\u001b[01m\u001b[K./src/coco.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kvalidate_coco_recall\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/coco.c:248:11:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kbase\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n char *\u001b[01;35m\u001b[Kbase\u001b[m\u001b[K = \"results/comp4_det_test_\";\n \u001b[01;35m\u001b[K^~~~\u001b[m\u001b[K\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/dice.c -o obj/dice.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/yolo.c -o obj/yolo.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/detector.c -o obj/detector.o\n\u001b[01m\u001b[K./src/detector.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Ktrain_detector\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/detector.c:386:72:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Ksuggest parentheses around ‘\u001b[01m\u001b[K&&\u001b[m\u001b[K’ within ‘\u001b[01m\u001b[K||\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wparentheses\u001b[m\u001b[K]\n \u001b[01;35m\u001b[K(iteration >= (iter_save + 1000) || iteration % 1000 == 0) && net.max_batches < 10000\u001b[m\u001b[K)\n \u001b[01;35m\u001b[K~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/detector.c:319:13:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kvariable ‘\u001b[01m\u001b[Kdraw_precision\u001b[m\u001b[K’ set but not used [\u001b[01;35m\u001b[K-Wunused-but-set-variable\u001b[m\u001b[K]\n int \u001b[01;35m\u001b[Kdraw_precision\u001b[m\u001b[K = 0;\n \u001b[01;35m\u001b[K^~~~~~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/detector.c:67:11:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kavg_contrastive_acc\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float \u001b[01;35m\u001b[Kavg_contrastive_acc\u001b[m\u001b[K = 0;\n \u001b[01;35m\u001b[K^~~~~~~~~~~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/detector.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kprint_cocos\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/detector.c:486:29:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kformat not a string literal and no format arguments [\u001b[01;35m\u001b[K-Wformat-security\u001b[m\u001b[K]\n fprintf(fp, \u001b[01;35m\u001b[Kbuff\u001b[m\u001b[K);\n \u001b[01;35m\u001b[K^~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/detector.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Keliminate_bdd\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/detector.c:579:21:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kstatement with no effect [\u001b[01;35m\u001b[K-Wunused-value\u001b[m\u001b[K]\n \u001b[01;35m\u001b[Kfor\u001b[m\u001b[K (k; buf[k + n] != '\\0'; k++)\n \u001b[01;35m\u001b[K^~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/detector.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kvalidate_detector\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/detector.c:700:13:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kmkd2\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n int \u001b[01;35m\u001b[Kmkd2\u001b[m\u001b[K = make_directory(buff2, 0777);\n \u001b[01;35m\u001b[K^~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/detector.c:698:13:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kmkd\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n int \u001b[01;35m\u001b[Kmkd\u001b[m\u001b[K = make_directory(buff, 0777);\n \u001b[01;35m\u001b[K^~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/detector.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kvalidate_detector_map\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/detector.c:1332:15:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kclass_recall\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float \u001b[01;35m\u001b[Kclass_recall\u001b[m\u001b[K = (float)tp_for_thresh_per_class[i] / ((float)tp_for_thresh_per_class[i] + (float)(truth_classes_count[i] - tp_for_thresh_per_class[i]));\n \u001b[01;35m\u001b[K^~~~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/detector.c:1331:15:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kclass_precision\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float \u001b[01;35m\u001b[Kclass_precision\u001b[m\u001b[K = (float)tp_for_thresh_per_class[i] / ((float)tp_for_thresh_per_class[i] + (float)fp_for_thresh_per_class[i]);\n \u001b[01;35m\u001b[K^~~~~~~~~~~~~~~\u001b[m\u001b[K\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/layer.c -o obj/layer.o\n\u001b[01m\u001b[K./src/layer.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kfree_layer_custom\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/layer.c:208:68:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Ksuggest parentheses around ‘\u001b[01m\u001b[K&&\u001b[m\u001b[K’ within ‘\u001b[01m\u001b[K||\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wparentheses\u001b[m\u001b[K]\n if (l.delta_gpu && (l.optimized_memory < 1 || \u001b[01;35m\u001b[Kl.keep_delta_gpu && l.optimized_memory < 3\u001b[m\u001b[K)) cuda_free(l.delta_gpu), l.delta_gpu = NULL;\n \u001b[01;35m\u001b[K~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~\u001b[m\u001b[K\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/compare.c -o obj/compare.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/classifier.c -o obj/classifier.o\n\u001b[01m\u001b[K./src/classifier.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Ktrain_classifier\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/classifier.c:190:13:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kvariable ‘\u001b[01m\u001b[Kdraw_precision\u001b[m\u001b[K’ set but not used [\u001b[01;35m\u001b[K-Wunused-but-set-variable\u001b[m\u001b[K]\n int \u001b[01;35m\u001b[Kdraw_precision\u001b[m\u001b[K = 0;\n \u001b[01;35m\u001b[K^~~~~~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/classifier.c:146:9:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kcount\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n int \u001b[01;35m\u001b[Kcount\u001b[m\u001b[K = 0;\n \u001b[01;35m\u001b[K^~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/classifier.c:35:11:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kavg_contrastive_acc\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float \u001b[01;35m\u001b[Kavg_contrastive_acc\u001b[m\u001b[K = 0;\n \u001b[01;35m\u001b[K^~~~~~~~~~~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/classifier.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kpredict_classifier\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/classifier.c:855:13:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Ktime\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n clock_t \u001b[01;35m\u001b[Ktime\u001b[m\u001b[K;\n \u001b[01;35m\u001b[K^~~~\u001b[m\u001b[K\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/local_layer.c -o obj/local_layer.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/swag.c -o obj/swag.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/shortcut_layer.c -o obj/shortcut_layer.o\n\u001b[01m\u001b[K./src/shortcut_layer.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kmake_shortcut_layer\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/shortcut_layer.c:55:15:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kscale\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float \u001b[01;35m\u001b[Kscale\u001b[m\u001b[K = sqrt(2. / l.nweights);\n \u001b[01;35m\u001b[K^~~~~\u001b[m\u001b[K\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/activation_layer.c -o obj/activation_layer.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/rnn_layer.c -o obj/rnn_layer.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/gru_layer.c -o obj/gru_layer.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/rnn.c -o obj/rnn.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/rnn_vid.c -o obj/rnn_vid.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/crnn_layer.c -o obj/crnn_layer.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/demo.c -o obj/demo.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/tag.c -o obj/tag.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/cifar.c -o obj/cifar.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/go.c -o obj/go.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/batchnorm_layer.c -o obj/batchnorm_layer.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/art.c -o obj/art.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/region_layer.c -o obj/region_layer.o\n\u001b[01m\u001b[K./src/region_layer.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kresize_region_layer\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/region_layer.c:59:9:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kold_h\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n int \u001b[01;35m\u001b[Kold_h\u001b[m\u001b[K = l->h;\n \u001b[01;35m\u001b[K^~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/region_layer.c:58:9:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kold_w\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n int \u001b[01;35m\u001b[Kold_w\u001b[m\u001b[K = l->w;\n \u001b[01;35m\u001b[K^~~~~\u001b[m\u001b[K\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/reorg_layer.c -o obj/reorg_layer.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/reorg_old_layer.c -o obj/reorg_old_layer.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/super.c -o obj/super.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/voxel.c -o obj/voxel.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/tree.c -o obj/tree.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/yolo_layer.c -o obj/yolo_layer.o\n\u001b[01m\u001b[K./src/yolo_layer.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kmake_yolo_layer\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/yolo_layer.c:68:38:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kpassing argument 1 of ‘\u001b[01m\u001b[KcudaHostAlloc\u001b[m\u001b[K’ from incompatible pointer type [\u001b[01;35m\u001b[K-Wincompatible-pointer-types\u001b[m\u001b[K]\n if (cudaSuccess == cudaHostAlloc(\u001b[01;35m\u001b[K&\u001b[m\u001b[Kl.output, batch*l.outputs*sizeof(float), cudaHostRegisterMapped)) l.output_pinned = 1;\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\nIn file included from \u001b[01m\u001b[K/usr/local/cuda/include/cuda_runtime.h:96:0\u001b[m\u001b[K,\n from \u001b[01m\u001b[Kinclude/darknet.h:41\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/activations.h:3\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/layer.h:4\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/yolo_layer.h:5\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/yolo_layer.c:1\u001b[m\u001b[K:\n\u001b[01m\u001b[K/usr/local/cuda/include/cuda_runtime_api.h:4391:39:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kexpected ‘\u001b[01m\u001b[Kvoid **\u001b[m\u001b[K’ but argument is of type ‘\u001b[01m\u001b[Kfloat **\u001b[m\u001b[K’\n extern __host__ cudaError_t CUDARTAPI \u001b[01;36m\u001b[KcudaHostAlloc\u001b[m\u001b[K(void **pHost, size_t size, unsigned int flags);\n \u001b[01;36m\u001b[K^~~~~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/yolo_layer.c:75:38:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kpassing argument 1 of ‘\u001b[01m\u001b[KcudaHostAlloc\u001b[m\u001b[K’ from incompatible pointer type [\u001b[01;35m\u001b[K-Wincompatible-pointer-types\u001b[m\u001b[K]\n if (cudaSuccess == cudaHostAlloc(\u001b[01;35m\u001b[K&\u001b[m\u001b[Kl.delta, batch*l.outputs*sizeof(float), cudaHostRegisterMapped)) l.delta_pinned = 1;\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\nIn file included from \u001b[01m\u001b[K/usr/local/cuda/include/cuda_runtime.h:96:0\u001b[m\u001b[K,\n from \u001b[01m\u001b[Kinclude/darknet.h:41\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/activations.h:3\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/layer.h:4\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/yolo_layer.h:5\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/yolo_layer.c:1\u001b[m\u001b[K:\n\u001b[01m\u001b[K/usr/local/cuda/include/cuda_runtime_api.h:4391:39:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kexpected ‘\u001b[01m\u001b[Kvoid **\u001b[m\u001b[K’ but argument is of type ‘\u001b[01m\u001b[Kfloat **\u001b[m\u001b[K’\n extern __host__ cudaError_t CUDARTAPI \u001b[01;36m\u001b[KcudaHostAlloc\u001b[m\u001b[K(void **pHost, size_t size, unsigned int flags);\n \u001b[01;36m\u001b[K^~~~~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/yolo_layer.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kresize_yolo_layer\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/yolo_layer.c:106:42:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kpassing argument 1 of ‘\u001b[01m\u001b[KcudaHostAlloc\u001b[m\u001b[K’ from incompatible pointer type [\u001b[01;35m\u001b[K-Wincompatible-pointer-types\u001b[m\u001b[K]\n if (cudaSuccess != cudaHostAlloc(\u001b[01;35m\u001b[K&\u001b[m\u001b[Kl->output, l->batch*l->outputs * sizeof(float), cudaHostRegisterMapped)) {\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\nIn file included from \u001b[01m\u001b[K/usr/local/cuda/include/cuda_runtime.h:96:0\u001b[m\u001b[K,\n from \u001b[01m\u001b[Kinclude/darknet.h:41\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/activations.h:3\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/layer.h:4\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/yolo_layer.h:5\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/yolo_layer.c:1\u001b[m\u001b[K:\n\u001b[01m\u001b[K/usr/local/cuda/include/cuda_runtime_api.h:4391:39:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kexpected ‘\u001b[01m\u001b[Kvoid **\u001b[m\u001b[K’ but argument is of type ‘\u001b[01m\u001b[Kfloat **\u001b[m\u001b[K’\n extern __host__ cudaError_t CUDARTAPI \u001b[01;36m\u001b[KcudaHostAlloc\u001b[m\u001b[K(void **pHost, size_t size, unsigned int flags);\n \u001b[01;36m\u001b[K^~~~~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/yolo_layer.c:115:42:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kpassing argument 1 of ‘\u001b[01m\u001b[KcudaHostAlloc\u001b[m\u001b[K’ from incompatible pointer type [\u001b[01;35m\u001b[K-Wincompatible-pointer-types\u001b[m\u001b[K]\n if (cudaSuccess != cudaHostAlloc(\u001b[01;35m\u001b[K&\u001b[m\u001b[Kl->delta, l->batch*l->outputs * sizeof(float), cudaHostRegisterMapped)) {\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\nIn file included from \u001b[01m\u001b[K/usr/local/cuda/include/cuda_runtime.h:96:0\u001b[m\u001b[K,\n from \u001b[01m\u001b[Kinclude/darknet.h:41\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/activations.h:3\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/layer.h:4\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/yolo_layer.h:5\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/yolo_layer.c:1\u001b[m\u001b[K:\n\u001b[01m\u001b[K/usr/local/cuda/include/cuda_runtime_api.h:4391:39:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kexpected ‘\u001b[01m\u001b[Kvoid **\u001b[m\u001b[K’ but argument is of type ‘\u001b[01m\u001b[Kfloat **\u001b[m\u001b[K’\n extern __host__ cudaError_t CUDARTAPI \u001b[01;36m\u001b[KcudaHostAlloc\u001b[m\u001b[K(void **pHost, size_t size, unsigned int flags);\n \u001b[01;36m\u001b[K^~~~~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/yolo_layer.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kprocess_batch\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/yolo_layer.c:426:25:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kvariable ‘\u001b[01m\u001b[Kbest_match_t\u001b[m\u001b[K’ set but not used [\u001b[01;35m\u001b[K-Wunused-but-set-variable\u001b[m\u001b[K]\n int \u001b[01;35m\u001b[Kbest_match_t\u001b[m\u001b[K = 0;\n \u001b[01;35m\u001b[K^~~~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/yolo_layer.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kforward_yolo_layer\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/yolo_layer.c:707:11:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kavg_anyobj\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float \u001b[01;35m\u001b[Kavg_anyobj\u001b[m\u001b[K = 0;\n \u001b[01;35m\u001b[K^~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/yolo_layer.c:706:11:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kavg_obj\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float \u001b[01;35m\u001b[Kavg_obj\u001b[m\u001b[K = 0;\n \u001b[01;35m\u001b[K^~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/yolo_layer.c:705:11:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kavg_cat\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float \u001b[01;35m\u001b[Kavg_cat\u001b[m\u001b[K = 0;\n \u001b[01;35m\u001b[K^~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/yolo_layer.c:704:11:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Krecall75\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float \u001b[01;35m\u001b[Krecall75\u001b[m\u001b[K = 0;\n \u001b[01;35m\u001b[K^~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/yolo_layer.c:703:11:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Krecall\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float \u001b[01;35m\u001b[Krecall\u001b[m\u001b[K = 0;\n \u001b[01;35m\u001b[K^~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/yolo_layer.c:702:11:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Ktot_ciou_loss\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float \u001b[01;35m\u001b[Ktot_ciou_loss\u001b[m\u001b[K = 0;\n \u001b[01;35m\u001b[K^~~~~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/yolo_layer.c:701:11:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Ktot_diou_loss\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float \u001b[01;35m\u001b[Ktot_diou_loss\u001b[m\u001b[K = 0;\n \u001b[01;35m\u001b[K^~~~~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/yolo_layer.c:698:11:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Ktot_ciou\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float \u001b[01;35m\u001b[Ktot_ciou\u001b[m\u001b[K = 0;\n \u001b[01;35m\u001b[K^~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/yolo_layer.c:697:11:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Ktot_diou\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float \u001b[01;35m\u001b[Ktot_diou\u001b[m\u001b[K = 0;\n \u001b[01;35m\u001b[K^~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/yolo_layer.c:696:11:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Ktot_giou\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float \u001b[01;35m\u001b[Ktot_giou\u001b[m\u001b[K = 0;\n \u001b[01;35m\u001b[K^~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/yolo_layer.c:668:12:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kn\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n int b, \u001b[01;35m\u001b[Kn\u001b[m\u001b[K;\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/gaussian_yolo_layer.c -o obj/gaussian_yolo_layer.o\n\u001b[01m\u001b[K./src/gaussian_yolo_layer.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kmake_gaussian_yolo_layer\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/gaussian_yolo_layer.c:71:38:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kpassing argument 1 of ‘\u001b[01m\u001b[KcudaHostAlloc\u001b[m\u001b[K’ from incompatible pointer type [\u001b[01;35m\u001b[K-Wincompatible-pointer-types\u001b[m\u001b[K]\n if (cudaSuccess == cudaHostAlloc(\u001b[01;35m\u001b[K&\u001b[m\u001b[Kl.output, batch*l.outputs * sizeof(float), cudaHostRegisterMapped)) l.output_pinned = 1;\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\nIn file included from \u001b[01m\u001b[K/usr/local/cuda/include/cuda_runtime.h:96:0\u001b[m\u001b[K,\n from \u001b[01m\u001b[Kinclude/darknet.h:41\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/gaussian_yolo_layer.h:5\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/gaussian_yolo_layer.c:7\u001b[m\u001b[K:\n\u001b[01m\u001b[K/usr/local/cuda/include/cuda_runtime_api.h:4391:39:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kexpected ‘\u001b[01m\u001b[Kvoid **\u001b[m\u001b[K’ but argument is of type ‘\u001b[01m\u001b[Kfloat **\u001b[m\u001b[K’\n extern __host__ cudaError_t CUDARTAPI \u001b[01;36m\u001b[KcudaHostAlloc\u001b[m\u001b[K(void **pHost, size_t size, unsigned int flags);\n \u001b[01;36m\u001b[K^~~~~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/gaussian_yolo_layer.c:78:38:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kpassing argument 1 of ‘\u001b[01m\u001b[KcudaHostAlloc\u001b[m\u001b[K’ from incompatible pointer type [\u001b[01;35m\u001b[K-Wincompatible-pointer-types\u001b[m\u001b[K]\n if (cudaSuccess == cudaHostAlloc(\u001b[01;35m\u001b[K&\u001b[m\u001b[Kl.delta, batch*l.outputs * sizeof(float), cudaHostRegisterMapped)) l.delta_pinned = 1;\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\nIn file included from \u001b[01m\u001b[K/usr/local/cuda/include/cuda_runtime.h:96:0\u001b[m\u001b[K,\n from \u001b[01m\u001b[Kinclude/darknet.h:41\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/gaussian_yolo_layer.h:5\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/gaussian_yolo_layer.c:7\u001b[m\u001b[K:\n\u001b[01m\u001b[K/usr/local/cuda/include/cuda_runtime_api.h:4391:39:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kexpected ‘\u001b[01m\u001b[Kvoid **\u001b[m\u001b[K’ but argument is of type ‘\u001b[01m\u001b[Kfloat **\u001b[m\u001b[K’\n extern __host__ cudaError_t CUDARTAPI \u001b[01;36m\u001b[KcudaHostAlloc\u001b[m\u001b[K(void **pHost, size_t size, unsigned int flags);\n \u001b[01;36m\u001b[K^~~~~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/gaussian_yolo_layer.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kresize_gaussian_yolo_layer\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/gaussian_yolo_layer.c:110:42:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kpassing argument 1 of ‘\u001b[01m\u001b[KcudaHostAlloc\u001b[m\u001b[K’ from incompatible pointer type [\u001b[01;35m\u001b[K-Wincompatible-pointer-types\u001b[m\u001b[K]\n if (cudaSuccess != cudaHostAlloc(\u001b[01;35m\u001b[K&\u001b[m\u001b[Kl->output, l->batch*l->outputs * sizeof(float), cudaHostRegisterMapped)) {\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\nIn file included from \u001b[01m\u001b[K/usr/local/cuda/include/cuda_runtime.h:96:0\u001b[m\u001b[K,\n from \u001b[01m\u001b[Kinclude/darknet.h:41\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/gaussian_yolo_layer.h:5\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/gaussian_yolo_layer.c:7\u001b[m\u001b[K:\n\u001b[01m\u001b[K/usr/local/cuda/include/cuda_runtime_api.h:4391:39:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kexpected ‘\u001b[01m\u001b[Kvoid **\u001b[m\u001b[K’ but argument is of type ‘\u001b[01m\u001b[Kfloat **\u001b[m\u001b[K’\n extern __host__ cudaError_t CUDARTAPI \u001b[01;36m\u001b[KcudaHostAlloc\u001b[m\u001b[K(void **pHost, size_t size, unsigned int flags);\n \u001b[01;36m\u001b[K^~~~~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/gaussian_yolo_layer.c:119:42:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kpassing argument 1 of ‘\u001b[01m\u001b[KcudaHostAlloc\u001b[m\u001b[K’ from incompatible pointer type [\u001b[01;35m\u001b[K-Wincompatible-pointer-types\u001b[m\u001b[K]\n if (cudaSuccess != cudaHostAlloc(\u001b[01;35m\u001b[K&\u001b[m\u001b[Kl->delta, l->batch*l->outputs * sizeof(float), cudaHostRegisterMapped)) {\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\nIn file included from \u001b[01m\u001b[K/usr/local/cuda/include/cuda_runtime.h:96:0\u001b[m\u001b[K,\n from \u001b[01m\u001b[Kinclude/darknet.h:41\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/gaussian_yolo_layer.h:5\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/gaussian_yolo_layer.c:7\u001b[m\u001b[K:\n\u001b[01m\u001b[K/usr/local/cuda/include/cuda_runtime_api.h:4391:39:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kexpected ‘\u001b[01m\u001b[Kvoid **\u001b[m\u001b[K’ but argument is of type ‘\u001b[01m\u001b[Kfloat **\u001b[m\u001b[K’\n extern __host__ cudaError_t CUDARTAPI \u001b[01;36m\u001b[KcudaHostAlloc\u001b[m\u001b[K(void **pHost, size_t size, unsigned int flags);\n \u001b[01;36m\u001b[K^~~~~~~~~~~~~\u001b[m\u001b[K\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/upsample_layer.c -o obj/upsample_layer.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/lstm_layer.c -o obj/lstm_layer.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/conv_lstm_layer.c -o obj/conv_lstm_layer.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/scale_channels_layer.c -o obj/scale_channels_layer.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/sam_layer.c -o obj/sam_layer.o\nnvcc -gencode arch=compute_35,code=sm_35 -gencode arch=compute_50,code=[sm_50,compute_50] -gencode arch=compute_52,code=[sm_52,compute_52] -gencode arch=compute_61,code=[sm_61,compute_61] -gencode arch=compute_70,code=[sm_70,compute_70] -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF --compiler-options \"-Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF\" -c ./src/convolutional_kernels.cu -o obj/convolutional_kernels.o\n\u001b[01m\u001b[K./src/convolutional_kernels.cu:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kvoid backward_convolutional_layer_gpu(convolutional_layer, network_state)\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/convolutional_kernels.cu:853:40:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kcomparison between signed and unsigned integer expressions [\u001b[01;35m\u001b[K-Wsign-compare\u001b[m\u001b[K]\n \u001b[01;35m\u001b[K if (*state.net.max_output16_size < l.\u001b[m\u001b[Knweights) {\n \u001b[01;35m\u001b[K~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~\u001b[m\u001b[K\nnvcc -gencode arch=compute_35,code=sm_35 -gencode arch=compute_50,code=[sm_50,compute_50] -gencode arch=compute_52,code=[sm_52,compute_52] -gencode arch=compute_61,code=[sm_61,compute_61] -gencode arch=compute_70,code=[sm_70,compute_70] -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF --compiler-options \"-Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF\" -c ./src/activation_kernels.cu -o obj/activation_kernels.o\n./src/activation_kernels.cu(263): warning: variable \"MISH_THRESHOLD\" was declared but never referenced\n\n./src/activation_kernels.cu(263): warning: variable \"MISH_THRESHOLD\" was declared but never referenced\n\n./src/activation_kernels.cu(263): warning: variable \"MISH_THRESHOLD\" was declared but never referenced\n\n./src/activation_kernels.cu(263): warning: variable \"MISH_THRESHOLD\" was declared but never referenced\n\n./src/activation_kernels.cu(263): warning: variable \"MISH_THRESHOLD\" was declared but never referenced\n\nnvcc -gencode arch=compute_35,code=sm_35 -gencode arch=compute_50,code=[sm_50,compute_50] -gencode arch=compute_52,code=[sm_52,compute_52] -gencode arch=compute_61,code=[sm_61,compute_61] -gencode arch=compute_70,code=[sm_70,compute_70] -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF --compiler-options \"-Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF\" -c ./src/im2col_kernels.cu -o obj/im2col_kernels.o\nnvcc -gencode arch=compute_35,code=sm_35 -gencode arch=compute_50,code=[sm_50,compute_50] -gencode arch=compute_52,code=[sm_52,compute_52] -gencode arch=compute_61,code=[sm_61,compute_61] -gencode arch=compute_70,code=[sm_70,compute_70] -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF --compiler-options \"-Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF\" -c ./src/col2im_kernels.cu -o obj/col2im_kernels.o\nnvcc -gencode arch=compute_35,code=sm_35 -gencode arch=compute_50,code=[sm_50,compute_50] -gencode arch=compute_52,code=[sm_52,compute_52] -gencode arch=compute_61,code=[sm_61,compute_61] -gencode arch=compute_70,code=[sm_70,compute_70] -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF --compiler-options \"-Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF\" -c ./src/blas_kernels.cu -o obj/blas_kernels.o\n./src/blas_kernels.cu(1086): warning: variable \"out_index\" was declared but never referenced\n\n./src/blas_kernels.cu(1130): warning: variable \"step\" was set but never used\n\n./src/blas_kernels.cu(1736): warning: variable \"stage_id\" was declared but never referenced\n\n./src/blas_kernels.cu(1086): warning: variable \"out_index\" was declared but never referenced\n\n./src/blas_kernels.cu(1130): warning: variable \"step\" was set but never used\n\n./src/blas_kernels.cu(1736): warning: variable \"stage_id\" was declared but never referenced\n\n./src/blas_kernels.cu(1086): warning: variable \"out_index\" was declared but never referenced\n\n./src/blas_kernels.cu(1130): warning: variable \"step\" was set but never used\n\n./src/blas_kernels.cu(1736): warning: variable \"stage_id\" was declared but never referenced\n\n./src/blas_kernels.cu(1086): warning: variable \"out_index\" was declared but never referenced\n\n./src/blas_kernels.cu(1130): warning: variable \"step\" was set but never used\n\n./src/blas_kernels.cu(1736): warning: variable \"stage_id\" was declared but never referenced\n\n./src/blas_kernels.cu(1086): warning: variable \"out_index\" was declared but never referenced\n\n./src/blas_kernels.cu(1130): warning: variable \"step\" was set but never used\n\n./src/blas_kernels.cu(1736): warning: variable \"stage_id\" was declared but never referenced\n\n\u001b[01m\u001b[K./src/blas_kernels.cu:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kvoid backward_shortcut_multilayer_gpu(int, int, int, int*, float**, float*, float*, float*, float*, int, float*, float**, WEIGHTS_NORMALIZATION_T)\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/blas_kernels.cu:1130:5:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kvariable ‘\u001b[01m\u001b[Kstep\u001b[m\u001b[K’ set but not used [\u001b[01;35m\u001b[K-Wunused-but-set-variable\u001b[m\u001b[K]\n \u001b[01;35m\u001b[Kint \u001b[m\u001b[Kstep = 0;\n \u001b[01;35m\u001b[K^~~~\u001b[m\u001b[K\nnvcc -gencode arch=compute_35,code=sm_35 -gencode arch=compute_50,code=[sm_50,compute_50] -gencode arch=compute_52,code=[sm_52,compute_52] -gencode arch=compute_61,code=[sm_61,compute_61] -gencode arch=compute_70,code=[sm_70,compute_70] -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF --compiler-options \"-Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF\" -c ./src/crop_layer_kernels.cu -o obj/crop_layer_kernels.o\nnvcc -gencode arch=compute_35,code=sm_35 -gencode arch=compute_50,code=[sm_50,compute_50] -gencode arch=compute_52,code=[sm_52,compute_52] -gencode arch=compute_61,code=[sm_61,compute_61] -gencode arch=compute_70,code=[sm_70,compute_70] -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF --compiler-options \"-Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF\" -c ./src/dropout_layer_kernels.cu -o obj/dropout_layer_kernels.o\n./src/dropout_layer_kernels.cu(140): warning: variable \"cur_scale\" was declared but never referenced\n\n./src/dropout_layer_kernels.cu(245): warning: variable \"cur_scale\" was declared but never referenced\n\n./src/dropout_layer_kernels.cu(262): warning: variable \"block_prob\" was declared but never referenced\n\n./src/dropout_layer_kernels.cu(140): warning: variable \"cur_scale\" was declared but never referenced\n\n./src/dropout_layer_kernels.cu(245): warning: variable \"cur_scale\" was declared but never referenced\n\n./src/dropout_layer_kernels.cu(262): warning: variable \"block_prob\" was declared but never referenced\n\n./src/dropout_layer_kernels.cu(140): warning: variable \"cur_scale\" was declared but never referenced\n\n./src/dropout_layer_kernels.cu(245): warning: variable \"cur_scale\" was declared but never referenced\n\n./src/dropout_layer_kernels.cu(262): warning: variable \"block_prob\" was declared but never referenced\n\n./src/dropout_layer_kernels.cu(140): warning: variable \"cur_scale\" was declared but never referenced\n\n./src/dropout_layer_kernels.cu(245): warning: variable \"cur_scale\" was declared but never referenced\n\n./src/dropout_layer_kernels.cu(262): warning: variable \"block_prob\" was declared but never referenced\n\n./src/dropout_layer_kernels.cu(140): warning: variable \"cur_scale\" was declared but never referenced\n\n./src/dropout_layer_kernels.cu(245): warning: variable \"cur_scale\" was declared but never referenced\n\n./src/dropout_layer_kernels.cu(262): warning: variable \"block_prob\" was declared but never referenced\n\nnvcc -gencode arch=compute_35,code=sm_35 -gencode arch=compute_50,code=[sm_50,compute_50] -gencode arch=compute_52,code=[sm_52,compute_52] -gencode arch=compute_61,code=[sm_61,compute_61] -gencode arch=compute_70,code=[sm_70,compute_70] -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF --compiler-options \"-Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF\" -c ./src/maxpool_layer_kernels.cu -o obj/maxpool_layer_kernels.o\nnvcc -gencode arch=compute_35,code=sm_35 -gencode arch=compute_50,code=[sm_50,compute_50] -gencode arch=compute_52,code=[sm_52,compute_52] -gencode arch=compute_61,code=[sm_61,compute_61] -gencode arch=compute_70,code=[sm_70,compute_70] -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF --compiler-options \"-Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF\" -c ./src/network_kernels.cu -o obj/network_kernels.o\n./src/network_kernels.cu(364): warning: variable \"l\" was declared but never referenced\n\n./src/network_kernels.cu(364): warning: variable \"l\" was declared but never referenced\n\n./src/network_kernels.cu(364): warning: variable \"l\" was declared but never referenced\n\n./src/network_kernels.cu(364): warning: variable \"l\" was declared but never referenced\n\n./src/network_kernels.cu(364): warning: variable \"l\" was declared but never referenced\n\n\u001b[01m\u001b[K./src/network_kernels.cu:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kfloat train_network_datum_gpu(network, float*, float*)\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/network_kernels.cu:364:7:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kvariable ‘\u001b[01m\u001b[Kl\u001b[m\u001b[K’ set but not used [\u001b[01;35m\u001b[K-Wunused-but-set-variable\u001b[m\u001b[K]\n \u001b[01;35m\u001b[K \u001b[m\u001b[K layer l = net.layers[net.n - 1];\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\nnvcc -gencode arch=compute_35,code=sm_35 -gencode arch=compute_50,code=[sm_50,compute_50] -gencode arch=compute_52,code=[sm_52,compute_52] -gencode arch=compute_61,code=[sm_61,compute_61] -gencode arch=compute_70,code=[sm_70,compute_70] -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF --compiler-options \"-Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF\" -c ./src/avgpool_layer_kernels.cu -o obj/avgpool_layer_kernels.o\ng++ -std=c++11 -std=c++11 -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF obj/image_opencv.o obj/http_stream.o obj/gemm.o obj/utils.o obj/dark_cuda.o obj/convolutional_layer.o obj/list.o obj/image.o obj/activations.o obj/im2col.o obj/col2im.o obj/blas.o obj/crop_layer.o obj/dropout_layer.o obj/maxpool_layer.o obj/softmax_layer.o obj/data.o obj/matrix.o obj/network.o obj/connected_layer.o obj/cost_layer.o obj/parser.o obj/option_list.o obj/darknet.o obj/detection_layer.o obj/captcha.o obj/route_layer.o obj/writing.o obj/box.o obj/nightmare.o obj/normalization_layer.o obj/avgpool_layer.o obj/coco.o obj/dice.o obj/yolo.o obj/detector.o obj/layer.o obj/compare.o obj/classifier.o obj/local_layer.o obj/swag.o obj/shortcut_layer.o obj/activation_layer.o obj/rnn_layer.o obj/gru_layer.o obj/rnn.o obj/rnn_vid.o obj/crnn_layer.o obj/demo.o obj/tag.o obj/cifar.o obj/go.o obj/batchnorm_layer.o obj/art.o obj/region_layer.o obj/reorg_layer.o obj/reorg_old_layer.o obj/super.o obj/voxel.o obj/tree.o obj/yolo_layer.o obj/gaussian_yolo_layer.o obj/upsample_layer.o obj/lstm_layer.o obj/conv_lstm_layer.o obj/scale_channels_layer.o obj/sam_layer.o obj/convolutional_kernels.o obj/activation_kernels.o obj/im2col_kernels.o obj/col2im_kernels.o obj/blas_kernels.o obj/crop_layer_kernels.o obj/dropout_layer_kernels.o obj/maxpool_layer_kernels.o obj/network_kernels.o obj/avgpool_layer_kernels.o -o darknet -lm -pthread -L/usr/local/cuda/lib64 -lcuda -lcudart -lcublas -lcurand -L/usr/local/cudnn/lib64 -lcudnn -lstdc++\n" ], [ "!apt install g++-5\n!apt install gcc-5\n!apt update\n!apt upgrade", "Reading package lists... Done\nBuilding dependency tree \nReading state information... Done\ng++-5 is already the newest version (5.5.0-12ubuntu1).\nThe following packages were automatically installed and are no longer required:\n linux-headers-4.15.0-134 linux-headers-4.15.0-134-generic\nUse 'apt autoremove' to remove them.\n0 upgraded, 0 newly installed, 0 to remove and 6 not upgraded.\nReading package lists... Done\nBuilding dependency tree \nReading state information... Done\ngcc-5 is already the newest version (5.5.0-12ubuntu1).\nThe following packages were automatically installed and are no longer required:\n linux-headers-4.15.0-134 linux-headers-4.15.0-134-generic\nUse 'apt autoremove' to remove them.\n0 upgraded, 0 newly installed, 0 to remove and 6 not upgraded.\nHit:1 http://security.ubuntu.com/ubuntu bionic-security InRelease\nHit:2 https://cloud.r-project.org/bin/linux/ubuntu bionic-cran40/ InRelease\nIgn:3 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 InRelease\nHit:4 http://ppa.launchpad.net/c2d4u.team/c2d4u4.0+/ubuntu bionic InRelease\nHit:5 http://archive.ubuntu.com/ubuntu bionic InRelease\nIgn:6 https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 InRelease\nHit:7 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 Release\nHit:8 https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 Release\nHit:9 http://archive.ubuntu.com/ubuntu bionic-updates InRelease\nHit:10 http://ppa.launchpad.net/cran/libgit2/ubuntu bionic InRelease\nHit:11 http://archive.ubuntu.com/ubuntu bionic-backports InRelease\nHit:12 http://ppa.launchpad.net/graphics-drivers/ppa/ubuntu bionic InRelease\nReading package lists... Done\nBuilding dependency tree \nReading state information... Done\n6 packages can be upgraded. Run 'apt list --upgradable' to see them.\nReading package lists... Done\nBuilding dependency tree \nReading state information... Done\nCalculating upgrade... Done\nThe following packages were automatically installed and are no longer required:\n linux-headers-4.15.0-134 linux-headers-4.15.0-134-generic\nUse 'apt autoremove' to remove them.\nThe following packages have been kept back:\n libcublas-dev libcublas10 libcudnn7 libcudnn7-dev libnccl-dev libnccl2\n0 upgraded, 0 newly installed, 0 to remove and 6 not upgraded.\n" ], [ "!nvidia-smi", "Sat Jan 30 09:44:57 2021 \n+-----------------------------------------------------------------------------+\n| NVIDIA-SMI 460.39 Driver Version: 418.67 CUDA Version: 10.1 |\n|-------------------------------+----------------------+----------------------+\n| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n| | | MIG M. |\n|===============================+======================+======================|\n| 0 Tesla T4 Off | 00000000:00:04.0 Off | 0 |\n| N/A 32C P8 9W / 70W | 0MiB / 15079MiB | 0% Default |\n| | | ERR! |\n+-------------------------------+----------------------+----------------------+\n \n+-----------------------------------------------------------------------------+\n| Processes: |\n| GPU GI CI PID Type Process name GPU Memory |\n| ID ID Usage |\n|=============================================================================|\n| No running processes found |\n+-----------------------------------------------------------------------------+\n" ], [ "!nvcc --version", "nvcc: NVIDIA (R) Cuda compiler driver\nCopyright (c) 2005-2019 NVIDIA Corporation\nBuilt on Sun_Jul_28_19:07:16_PDT_2019\nCuda compilation tools, release 10.1, V10.1.243\n" ], [ "%cd darknet\n!sed -i 's/OPENCV=1/OPENCV=0/g' Makefile\n!sed -i 's/GPU=0/GPU=1/g' Makefile\n!sed -i 's/CUDNN=0/CUDNN=1/g' Makefile\n!sed -i 's/CUDNN_HALF=0/CUDNN_HALF=1/g' Makefile\n\n!make", "[Errno 20] Not a directory: 'darknet'\n/content/drive/My Drive/darknet\nchmod +x *.sh\ng++ -std=c++11 -std=c++11 -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/image_opencv.cpp -o obj/image_opencv.o\ng++ -std=c++11 -std=c++11 -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/http_stream.cpp -o obj/http_stream.o\n\u001b[01m\u001b[K./src/http_stream.cpp:\u001b[m\u001b[K In member function ‘\u001b[01m\u001b[Kbool JSON_sender::write(const char*)\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/http_stream.cpp:249:21:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kn\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n int \u001b[01;35m\u001b[Kn\u001b[m\u001b[K = _write(client, outputbuf, outlen);\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/http_stream.cpp:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kvoid set_track_id(detection*, int, float, float, float, int, int, int)\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/http_stream.cpp:863:27:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kcomparison between signed and unsigned integer expressions [\u001b[01;35m\u001b[K-Wsign-compare\u001b[m\u001b[K]\n for (int i = 0; \u001b[01;35m\u001b[Ki < v.size()\u001b[m\u001b[K; ++i) {\n \u001b[01;35m\u001b[K~~^~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/http_stream.cpp:871:33:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kcomparison between signed and unsigned integer expressions [\u001b[01;35m\u001b[K-Wsign-compare\u001b[m\u001b[K]\n for (int old_id = 0; \u001b[01;35m\u001b[Kold_id < old_dets.size()\u001b[m\u001b[K; ++old_id) {\n \u001b[01;35m\u001b[K~~~~~~~^~~~~~~~~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/http_stream.cpp:890:31:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kcomparison between signed and unsigned integer expressions [\u001b[01;35m\u001b[K-Wsign-compare\u001b[m\u001b[K]\n for (int index = 0; \u001b[01;35m\u001b[Kindex < new_dets_num*old_dets.size()\u001b[m\u001b[K; ++index) {\n \u001b[01;35m\u001b[K~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/http_stream.cpp:926:28:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kcomparison between signed and unsigned integer expressions [\u001b[01;35m\u001b[K-Wsign-compare\u001b[m\u001b[K]\n if (\u001b[01;35m\u001b[Kold_dets_dq.size() > deque_size\u001b[m\u001b[K) old_dets_dq.pop_front();\n \u001b[01;35m\u001b[K~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~\u001b[m\u001b[K\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/gemm.c -o obj/gemm.o\n\u001b[01m\u001b[K./src/gemm.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kconvolution_2d\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/gemm.c:2038:15:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kout_w\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n const int \u001b[01;35m\u001b[Kout_w\u001b[m\u001b[K = (w + 2 * pad - ksize) / stride + 1; // output_width=input_width for stride=1 and pad=1\n \u001b[01;35m\u001b[K^~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/gemm.c:2037:15:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kout_h\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n const int \u001b[01;35m\u001b[Kout_h\u001b[m\u001b[K = (h + 2 * pad - ksize) / stride + 1; // output_height=input_height for stride=1 and pad=1\n \u001b[01;35m\u001b[K^~~~~\u001b[m\u001b[K\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/utils.c -o obj/utils.o\n\u001b[01m\u001b[K./src/utils.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kcustom_hash\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/utils.c:1045:12:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Ksuggest parentheses around assignment used as truth value [\u001b[01;35m\u001b[K-Wparentheses\u001b[m\u001b[K]\n while (\u001b[01;35m\u001b[Kc\u001b[m\u001b[K = *str++)\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/dark_cuda.c -o obj/dark_cuda.o\n\u001b[01m\u001b[K./src/dark_cuda.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kcudnn_check_error_extended\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/dark_cuda.c:224:20:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kcomparison between ‘\u001b[01m\u001b[KcudaError_t {aka enum cudaError}\u001b[m\u001b[K’ and ‘\u001b[01m\u001b[Kenum <anonymous>\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wenum-compare\u001b[m\u001b[K]\n if (status \u001b[01;35m\u001b[K!=\u001b[m\u001b[K CUDNN_STATUS_SUCCESS)\n \u001b[01;35m\u001b[K^~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/dark_cuda.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kpre_allocate_pinned_memory\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/dark_cuda.c:276:40:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kformat ‘\u001b[01m\u001b[K%u\u001b[m\u001b[K’ expects argument of type ‘\u001b[01m\u001b[Kunsigned int\u001b[m\u001b[K’, but argument 2 has type ‘\u001b[01m\u001b[Klong unsigned int\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wformat=\u001b[m\u001b[K]\n printf(\"pre_allocate: size = \u001b[01;35m\u001b[K%Iu\u001b[m\u001b[K MB, num_of_blocks = %Iu, block_size = %Iu MB \\n\",\n \u001b[01;35m\u001b[K~~^\u001b[m\u001b[K\n \u001b[32m\u001b[K%Ilu\u001b[m\u001b[K\n \u001b[32m\u001b[Ksize / (1024*1024)\u001b[m\u001b[K, num_of_blocks, pinned_block_size / (1024 * 1024));\n \u001b[32m\u001b[K~~~~~~~~~~~~~~~~~~\u001b[m\u001b[K \n\u001b[01m\u001b[K./src/dark_cuda.c:276:64:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kformat ‘\u001b[01m\u001b[K%u\u001b[m\u001b[K’ expects argument of type ‘\u001b[01m\u001b[Kunsigned int\u001b[m\u001b[K’, but argument 3 has type ‘\u001b[01m\u001b[Ksize_t {aka const long unsigned int}\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wformat=\u001b[m\u001b[K]\n printf(\"pre_allocate: size = %Iu MB, num_of_blocks = \u001b[01;35m\u001b[K%Iu\u001b[m\u001b[K, block_size = %Iu MB \\n\",\n \u001b[01;35m\u001b[K~~^\u001b[m\u001b[K\n \u001b[32m\u001b[K%Ilu\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/dark_cuda.c:276:82:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kformat ‘\u001b[01m\u001b[K%u\u001b[m\u001b[K’ expects argument of type ‘\u001b[01m\u001b[Kunsigned int\u001b[m\u001b[K’, but argument 4 has type ‘\u001b[01m\u001b[Klong unsigned int\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wformat=\u001b[m\u001b[K]\n printf(\"pre_allocate: size = %Iu MB, num_of_blocks = %Iu, block_size = \u001b[01;35m\u001b[K%Iu\u001b[m\u001b[K MB \\n\",\n \u001b[01;35m\u001b[K~~^\u001b[m\u001b[K\n \u001b[32m\u001b[K%Ilu\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/dark_cuda.c:286:37:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kformat ‘\u001b[01m\u001b[K%d\u001b[m\u001b[K’ expects argument of type ‘\u001b[01m\u001b[Kint\u001b[m\u001b[K’, but argument 2 has type ‘\u001b[01m\u001b[Ksize_t {aka const long unsigned int}\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wformat=\u001b[m\u001b[K]\n printf(\" Allocated \u001b[01;35m\u001b[K%d\u001b[m\u001b[K pinned block \\n\", pinned_block_size);\n \u001b[01;35m\u001b[K~^\u001b[m\u001b[K\n \u001b[32m\u001b[K%ld\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/dark_cuda.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kcuda_make_array_pinned_preallocated\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/dark_cuda.c:307:43:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kformat ‘\u001b[01m\u001b[K%d\u001b[m\u001b[K’ expects argument of type ‘\u001b[01m\u001b[Kint\u001b[m\u001b[K’, but argument 2 has type ‘\u001b[01m\u001b[Ksize_t {aka long unsigned int}\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wformat=\u001b[m\u001b[K]\n printf(\"\\n Pinned block_id = \u001b[01;35m\u001b[K%d\u001b[m\u001b[K, filled = %f %% \\n\", pinned_block_id, filled);\n \u001b[01;35m\u001b[K~^\u001b[m\u001b[K\n \u001b[32m\u001b[K%ld\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/dark_cuda.c:322:64:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kformat ‘\u001b[01m\u001b[K%d\u001b[m\u001b[K’ expects argument of type ‘\u001b[01m\u001b[Kint\u001b[m\u001b[K’, but argument 2 has type ‘\u001b[01m\u001b[Klong unsigned int\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wformat=\u001b[m\u001b[K]\n printf(\"Try to allocate new pinned memory, size = \u001b[01;35m\u001b[K%d\u001b[m\u001b[K MB \\n\", \u001b[32m\u001b[Ksize / (1024 * 1024)\u001b[m\u001b[K);\n \u001b[01;35m\u001b[K~^\u001b[m\u001b[K \u001b[32m\u001b[K~~~~~~~~~~~~~~~~~~~~\u001b[m\u001b[K\n \u001b[32m\u001b[K%ld\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/dark_cuda.c:328:63:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kformat ‘\u001b[01m\u001b[K%d\u001b[m\u001b[K’ expects argument of type ‘\u001b[01m\u001b[Kint\u001b[m\u001b[K’, but argument 2 has type ‘\u001b[01m\u001b[Klong unsigned int\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wformat=\u001b[m\u001b[K]\n printf(\"Try to allocate new pinned BLOCK, size = \u001b[01;35m\u001b[K%d\u001b[m\u001b[K MB \\n\", \u001b[32m\u001b[Ksize / (1024 * 1024)\u001b[m\u001b[K);\n \u001b[01;35m\u001b[K~^\u001b[m\u001b[K \u001b[32m\u001b[K~~~~~~~~~~~~~~~~~~~~\u001b[m\u001b[K\n \u001b[32m\u001b[K%ld\u001b[m\u001b[K\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/convolutional_layer.c -o obj/convolutional_layer.o\n\u001b[01m\u001b[K./src/convolutional_layer.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kforward_convolutional_layer\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/convolutional_layer.c:1341:32:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kt_intput_size\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n size_t \u001b[01;35m\u001b[Kt_intput_size\u001b[m\u001b[K = binary_transpose_align_input(k, n, state.workspace, &l.t_bit_input, ldb_align, l.bit_align);\n \u001b[01;35m\u001b[K^~~~~~~~~~~~~\u001b[m\u001b[K\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/list.c -o obj/list.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/image.c -o obj/image.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/activations.c -o obj/activations.o\n\u001b[01m\u001b[K./src/activations.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kactivate\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/activations.c:79:5:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kenumeration value ‘\u001b[01m\u001b[KRELU6\u001b[m\u001b[K’ not handled in switch [\u001b[01;35m\u001b[K-Wswitch\u001b[m\u001b[K]\n \u001b[01;35m\u001b[Kswitch\u001b[m\u001b[K(a){\n \u001b[01;35m\u001b[K^~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/activations.c:79:5:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kenumeration value ‘\u001b[01m\u001b[KSWISH\u001b[m\u001b[K’ not handled in switch [\u001b[01;35m\u001b[K-Wswitch\u001b[m\u001b[K]\n\u001b[01m\u001b[K./src/activations.c:79:5:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kenumeration value ‘\u001b[01m\u001b[KMISH\u001b[m\u001b[K’ not handled in switch [\u001b[01;35m\u001b[K-Wswitch\u001b[m\u001b[K]\n\u001b[01m\u001b[K./src/activations.c:79:5:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kenumeration value ‘\u001b[01m\u001b[KHARD_MISH\u001b[m\u001b[K’ not handled in switch [\u001b[01;35m\u001b[K-Wswitch\u001b[m\u001b[K]\n\u001b[01m\u001b[K./src/activations.c:79:5:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kenumeration value ‘\u001b[01m\u001b[KNORM_CHAN\u001b[m\u001b[K’ not handled in switch [\u001b[01;35m\u001b[K-Wswitch\u001b[m\u001b[K]\n\u001b[01m\u001b[K./src/activations.c:79:5:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kenumeration value ‘\u001b[01m\u001b[KNORM_CHAN_SOFTMAX\u001b[m\u001b[K’ not handled in switch [\u001b[01;35m\u001b[K-Wswitch\u001b[m\u001b[K]\n\u001b[01m\u001b[K./src/activations.c:79:5:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kenumeration value ‘\u001b[01m\u001b[KNORM_CHAN_SOFTMAX_MAXVAL\u001b[m\u001b[K’ not handled in switch [\u001b[01;35m\u001b[K-Wswitch\u001b[m\u001b[K]\n\u001b[01m\u001b[K./src/activations.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kgradient\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/activations.c:310:5:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kenumeration value ‘\u001b[01m\u001b[KSWISH\u001b[m\u001b[K’ not handled in switch [\u001b[01;35m\u001b[K-Wswitch\u001b[m\u001b[K]\n \u001b[01;35m\u001b[Kswitch\u001b[m\u001b[K(a){\n \u001b[01;35m\u001b[K^~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/activations.c:310:5:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kenumeration value ‘\u001b[01m\u001b[KMISH\u001b[m\u001b[K’ not handled in switch [\u001b[01;35m\u001b[K-Wswitch\u001b[m\u001b[K]\n\u001b[01m\u001b[K./src/activations.c:310:5:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kenumeration value ‘\u001b[01m\u001b[KHARD_MISH\u001b[m\u001b[K’ not handled in switch [\u001b[01;35m\u001b[K-Wswitch\u001b[m\u001b[K]\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/im2col.c -o obj/im2col.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/col2im.c -o obj/col2im.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/blas.c -o obj/blas.o\n\u001b[01m\u001b[K./src/blas.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kbackward_shortcut_multilayer_cpu\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/blas.c:207:21:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kout_index\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n int \u001b[01;35m\u001b[Kout_index\u001b[m\u001b[K = id;\n \u001b[01;35m\u001b[K^~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/blas.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kfind_sim\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/blas.c:597:59:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kformat ‘\u001b[01m\u001b[K%d\u001b[m\u001b[K’ expects argument of type ‘\u001b[01m\u001b[Kint\u001b[m\u001b[K’, but argument 2 has type ‘\u001b[01m\u001b[Ksize_t {aka long unsigned int}\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wformat=\u001b[m\u001b[K]\n printf(\" Error: find_sim(): sim isn't found: i = \u001b[01;35m\u001b[K%d\u001b[m\u001b[K, j = %d, z = %d \\n\", i, j, z);\n \u001b[01;35m\u001b[K~^\u001b[m\u001b[K\n \u001b[32m\u001b[K%ld\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/blas.c:597:67:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kformat ‘\u001b[01m\u001b[K%d\u001b[m\u001b[K’ expects argument of type ‘\u001b[01m\u001b[Kint\u001b[m\u001b[K’, but argument 3 has type ‘\u001b[01m\u001b[Ksize_t {aka long unsigned int}\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wformat=\u001b[m\u001b[K]\n printf(\" Error: find_sim(): sim isn't found: i = %d, j = \u001b[01;35m\u001b[K%d\u001b[m\u001b[K, z = %d \\n\", i, j, z);\n \u001b[01;35m\u001b[K~^\u001b[m\u001b[K\n \u001b[32m\u001b[K%ld\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/blas.c:597:75:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kformat ‘\u001b[01m\u001b[K%d\u001b[m\u001b[K’ expects argument of type ‘\u001b[01m\u001b[Kint\u001b[m\u001b[K’, but argument 4 has type ‘\u001b[01m\u001b[Ksize_t {aka long unsigned int}\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wformat=\u001b[m\u001b[K]\n printf(\" Error: find_sim(): sim isn't found: i = %d, j = %d, z = \u001b[01;35m\u001b[K%d\u001b[m\u001b[K \\n\", i, j, z);\n \u001b[01;35m\u001b[K~^\u001b[m\u001b[K\n \u001b[32m\u001b[K%ld\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/blas.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kfind_P_constrastive\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/blas.c:611:68:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kformat ‘\u001b[01m\u001b[K%d\u001b[m\u001b[K’ expects argument of type ‘\u001b[01m\u001b[Kint\u001b[m\u001b[K’, but argument 2 has type ‘\u001b[01m\u001b[Ksize_t {aka long unsigned int}\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wformat=\u001b[m\u001b[K]\n printf(\" Error: find_P_constrastive(): P isn't found: i = \u001b[01;35m\u001b[K%d\u001b[m\u001b[K, j = %d, z = %d \\n\", i, j, z);\n \u001b[01;35m\u001b[K~^\u001b[m\u001b[K\n \u001b[32m\u001b[K%ld\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/blas.c:611:76:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kformat ‘\u001b[01m\u001b[K%d\u001b[m\u001b[K’ expects argument of type ‘\u001b[01m\u001b[Kint\u001b[m\u001b[K’, but argument 3 has type ‘\u001b[01m\u001b[Ksize_t {aka long unsigned int}\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wformat=\u001b[m\u001b[K]\n printf(\" Error: find_P_constrastive(): P isn't found: i = %d, j = \u001b[01;35m\u001b[K%d\u001b[m\u001b[K, z = %d \\n\", i, j, z);\n \u001b[01;35m\u001b[K~^\u001b[m\u001b[K\n \u001b[32m\u001b[K%ld\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/blas.c:611:84:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kformat ‘\u001b[01m\u001b[K%d\u001b[m\u001b[K’ expects argument of type ‘\u001b[01m\u001b[Kint\u001b[m\u001b[K’, but argument 4 has type ‘\u001b[01m\u001b[Ksize_t {aka long unsigned int}\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wformat=\u001b[m\u001b[K]\n printf(\" Error: find_P_constrastive(): P isn't found: i = %d, j = %d, z = \u001b[01;35m\u001b[K%d\u001b[m\u001b[K \\n\", i, j, z);\n \u001b[01;35m\u001b[K~^\u001b[m\u001b[K\n \u001b[32m\u001b[K%ld\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/blas.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[KP_constrastive_f\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/blas.c:651:79:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kformat ‘\u001b[01m\u001b[K%d\u001b[m\u001b[K’ expects argument of type ‘\u001b[01m\u001b[Kint\u001b[m\u001b[K’, but argument 3 has type ‘\u001b[01m\u001b[Ksize_t {aka long unsigned int}\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wformat=\u001b[m\u001b[K]\n fprintf(stderr, \" Error: in P_constrastive must be i != l, while i = \u001b[01;35m\u001b[K%d\u001b[m\u001b[K, l = %d \\n\", i, l);\n \u001b[01;35m\u001b[K~^\u001b[m\u001b[K\n \u001b[32m\u001b[K%ld\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/blas.c:651:87:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kformat ‘\u001b[01m\u001b[K%d\u001b[m\u001b[K’ expects argument of type ‘\u001b[01m\u001b[Kint\u001b[m\u001b[K’, but argument 4 has type ‘\u001b[01m\u001b[Ksize_t {aka long unsigned int}\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wformat=\u001b[m\u001b[K]\n fprintf(stderr, \" Error: in P_constrastive must be i != l, while i = %d, l = \u001b[01;35m\u001b[K%d\u001b[m\u001b[K \\n\", i, l);\n \u001b[01;35m\u001b[K~^\u001b[m\u001b[K\n \u001b[32m\u001b[K%ld\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/blas.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[KP_constrastive\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/blas.c:785:79:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kformat ‘\u001b[01m\u001b[K%d\u001b[m\u001b[K’ expects argument of type ‘\u001b[01m\u001b[Kint\u001b[m\u001b[K’, but argument 3 has type ‘\u001b[01m\u001b[Ksize_t {aka long unsigned int}\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wformat=\u001b[m\u001b[K]\n fprintf(stderr, \" Error: in P_constrastive must be i != l, while i = \u001b[01;35m\u001b[K%d\u001b[m\u001b[K, l = %d \\n\", i, l);\n \u001b[01;35m\u001b[K~^\u001b[m\u001b[K\n \u001b[32m\u001b[K%ld\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/blas.c:785:87:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kformat ‘\u001b[01m\u001b[K%d\u001b[m\u001b[K’ expects argument of type ‘\u001b[01m\u001b[Kint\u001b[m\u001b[K’, but argument 4 has type ‘\u001b[01m\u001b[Ksize_t {aka long unsigned int}\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wformat=\u001b[m\u001b[K]\n fprintf(stderr, \" Error: in P_constrastive must be i != l, while i = %d, l = \u001b[01;35m\u001b[K%d\u001b[m\u001b[K \\n\", i, l);\n \u001b[01;35m\u001b[K~^\u001b[m\u001b[K\n \u001b[32m\u001b[K%ld\u001b[m\u001b[K\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/crop_layer.c -o obj/crop_layer.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/dropout_layer.c -o obj/dropout_layer.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/maxpool_layer.c -o obj/maxpool_layer.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/softmax_layer.c -o obj/softmax_layer.o\n\u001b[01m\u001b[K./src/softmax_layer.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kmake_contrastive_layer\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/softmax_layer.c:203:101:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kformat ‘\u001b[01m\u001b[K%d\u001b[m\u001b[K’ expects argument of type ‘\u001b[01m\u001b[Kint\u001b[m\u001b[K’, but argument 9 has type ‘\u001b[01m\u001b[Ksize_t {aka const long unsigned int}\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wformat=\u001b[m\u001b[K]\n fprintf(stderr, \"contrastive %4d x%4d x%4d x emb_size %4d x batch: %4d classes = %4d, step = \u001b[01;35m\u001b[K%4d\u001b[m\u001b[K \\n\", w, h, l.n, l.embedding_size, batch, l.classes, step);\n \u001b[01;35m\u001b[K~~^\u001b[m\u001b[K\n \u001b[32m\u001b[K%4ld\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/softmax_layer.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kforward_contrastive_layer\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/softmax_layer.c:244:27:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kvariable ‘\u001b[01m\u001b[Kmax_truth\u001b[m\u001b[K’ set but not used [\u001b[01;35m\u001b[K-Wunused-but-set-variable\u001b[m\u001b[K]\n float \u001b[01;35m\u001b[Kmax_truth\u001b[m\u001b[K = 0;\n \u001b[01;35m\u001b[K^~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/softmax_layer.c:423:71:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kformat ‘\u001b[01m\u001b[K%d\u001b[m\u001b[K’ expects argument of type ‘\u001b[01m\u001b[Kint\u001b[m\u001b[K’, but argument 2 has type ‘\u001b[01m\u001b[Ksize_t {aka const long unsigned int}\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wformat=\u001b[m\u001b[K]\n printf(\" Error: too large number of bboxes: contr_size = \u001b[01;35m\u001b[K%d\u001b[m\u001b[K > max_contr_size = %d \\n\", contr_size, max_contr_size);\n \u001b[01;35m\u001b[K~^\u001b[m\u001b[K\n \u001b[32m\u001b[K%ld\u001b[m\u001b[K\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/data.c -o obj/data.o\n\u001b[01m\u001b[K./src/data.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kload_data_detection\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/data.c:1409:43:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kvariable ‘\u001b[01m\u001b[Kr_scale\u001b[m\u001b[K’ set but not used [\u001b[01;35m\u001b[K-Wunused-but-set-variable\u001b[m\u001b[K]\n float r1 = 0, r2 = 0, r3 = 0, r4 = 0, \u001b[01;35m\u001b[Kr_scale\u001b[m\u001b[K;\n \u001b[01;35m\u001b[K^~~~~~~\u001b[m\u001b[K\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/matrix.c -o obj/matrix.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/network.c -o obj/network.o\n\u001b[01m\u001b[K./src/network.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Ktrain_network_waitkey\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/network.c:433:13:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kema_period\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n int \u001b[01;35m\u001b[Kema_period\u001b[m\u001b[K = (net.max_batches - ema_start_point - 1000) * (1.0 - net.ema_alpha);\n \u001b[01;35m\u001b[K^~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/network.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kresize_network\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/network.c:658:42:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kpassing argument 1 of ‘\u001b[01m\u001b[KcudaHostAlloc\u001b[m\u001b[K’ from incompatible pointer type [\u001b[01;35m\u001b[K-Wincompatible-pointer-types\u001b[m\u001b[K]\n if (cudaSuccess == cudaHostAlloc(\u001b[01;35m\u001b[K&\u001b[m\u001b[Knet->input_pinned_cpu, size * sizeof(float), cudaHostRegisterMapped))\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\nIn file included from \u001b[01m\u001b[K/usr/local/cuda/include/cuda_runtime.h:96:0\u001b[m\u001b[K,\n from \u001b[01m\u001b[Kinclude/darknet.h:41\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/network.c:1\u001b[m\u001b[K:\n\u001b[01m\u001b[K/usr/local/cuda/include/cuda_runtime_api.h:4391:39:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kexpected ‘\u001b[01m\u001b[Kvoid **\u001b[m\u001b[K’ but argument is of type ‘\u001b[01m\u001b[Kfloat **\u001b[m\u001b[K’\n extern __host__ cudaError_t CUDARTAPI \u001b[01;36m\u001b[KcudaHostAlloc\u001b[m\u001b[K(void **pHost, size_t size, unsigned int flags);\n \u001b[01;36m\u001b[K^~~~~~~~~~~~~\u001b[m\u001b[K\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/connected_layer.c -o obj/connected_layer.o\n\u001b[01m\u001b[K./src/connected_layer.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kforward_connected_layer_gpu\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/connected_layer.c:346:11:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kone\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float \u001b[01;35m\u001b[Kone\u001b[m\u001b[K = 1; // alpha[0], beta[0]\n \u001b[01;35m\u001b[K^~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/connected_layer.c:344:13:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kc\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float * \u001b[01;35m\u001b[Kc\u001b[m\u001b[K = l.output_gpu;\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/connected_layer.c:343:13:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kb\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float * \u001b[01;35m\u001b[Kb\u001b[m\u001b[K = l.weights_gpu;\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/connected_layer.c:342:13:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Ka\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float * \u001b[01;35m\u001b[Ka\u001b[m\u001b[K = state.input;\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/connected_layer.c:341:9:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kn\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n int \u001b[01;35m\u001b[Kn\u001b[m\u001b[K = l.outputs;\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/connected_layer.c:340:9:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kk\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n int \u001b[01;35m\u001b[Kk\u001b[m\u001b[K = l.inputs;\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/connected_layer.c:339:9:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Km\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n int \u001b[01;35m\u001b[Km\u001b[m\u001b[K = l.batch;\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/cost_layer.c -o obj/cost_layer.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/parser.c -o obj/parser.o\n\u001b[01m\u001b[K./src/parser.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kparse_network_cfg_custom\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/parser.c:1680:42:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kpassing argument 1 of ‘\u001b[01m\u001b[KcudaHostAlloc\u001b[m\u001b[K’ from incompatible pointer type [\u001b[01;35m\u001b[K-Wincompatible-pointer-types\u001b[m\u001b[K]\n if (cudaSuccess == cudaHostAlloc(\u001b[01;35m\u001b[K&\u001b[m\u001b[Knet.input_pinned_cpu, size * sizeof(float), cudaHostRegisterMapped)) net.input_pinned_cpu_flag = 1;\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\nIn file included from \u001b[01m\u001b[K/usr/local/cuda/include/cuda_runtime.h:96:0\u001b[m\u001b[K,\n from \u001b[01m\u001b[Kinclude/darknet.h:41\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/activations.h:3\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/activation_layer.h:4\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/parser.c:6\u001b[m\u001b[K:\n\u001b[01m\u001b[K/usr/local/cuda/include/cuda_runtime_api.h:4391:39:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kexpected ‘\u001b[01m\u001b[Kvoid **\u001b[m\u001b[K’ but argument is of type ‘\u001b[01m\u001b[Kfloat **\u001b[m\u001b[K’\n extern __host__ cudaError_t CUDARTAPI \u001b[01;36m\u001b[KcudaHostAlloc\u001b[m\u001b[K(void **pHost, size_t size, unsigned int flags);\n \u001b[01;36m\u001b[K^~~~~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/parser.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kget_classes_multipliers\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/parser.c:428:29:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kargument 1 range [18446744071562067968, 18446744073709551615] exceeds maximum object size 9223372036854775807 [\u001b[01;35m\u001b[K-Walloc-size-larger-than=\u001b[m\u001b[K]\n \u001b[01;35m\u001b[Kclasses_multipliers = (float *)calloc(classes_counters, sizeof(float))\u001b[m\u001b[K;\n \u001b[01;35m\u001b[K~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\u001b[m\u001b[K\nIn file included from \u001b[01m\u001b[K./src/parser.c:3:0\u001b[m\u001b[K:\n\u001b[01m\u001b[K/usr/include/stdlib.h:541:14:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kin a call to allocation function ‘\u001b[01m\u001b[Kcalloc\u001b[m\u001b[K’ declared here\n extern void *\u001b[01;36m\u001b[Kcalloc\u001b[m\u001b[K (size_t __nmemb, size_t __size)\n \u001b[01;36m\u001b[K^~~~~~\u001b[m\u001b[K\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/option_list.c -o obj/option_list.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/darknet.c -o obj/darknet.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/detection_layer.c -o obj/detection_layer.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/captcha.c -o obj/captcha.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/route_layer.c -o obj/route_layer.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/writing.c -o obj/writing.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/box.c -o obj/box.o\n\u001b[01m\u001b[K./src/box.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kbox_iou_kind\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/box.c:154:5:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kenumeration value ‘\u001b[01m\u001b[KMSE\u001b[m\u001b[K’ not handled in switch [\u001b[01;35m\u001b[K-Wswitch\u001b[m\u001b[K]\n \u001b[01;35m\u001b[Kswitch\u001b[m\u001b[K(iou_kind) {\n \u001b[01;35m\u001b[K^~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/box.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kdiounms_sort\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/box.c:898:27:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kbeta_prob\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float \u001b[01;35m\u001b[Kbeta_prob\u001b[m\u001b[K = pow(dets[j].prob[k], 2) / sum_prob;\n \u001b[01;35m\u001b[K^~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/box.c:897:27:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kalpha_prob\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float \u001b[01;35m\u001b[Kalpha_prob\u001b[m\u001b[K = pow(dets[i].prob[k], 2) / sum_prob;\n \u001b[01;35m\u001b[K^~~~~~~~~~\u001b[m\u001b[K\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/nightmare.c -o obj/nightmare.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/normalization_layer.c -o obj/normalization_layer.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/avgpool_layer.c -o obj/avgpool_layer.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/coco.c -o obj/coco.o\n\u001b[01m\u001b[K./src/coco.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kvalidate_coco_recall\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/coco.c:248:11:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kbase\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n char *\u001b[01;35m\u001b[Kbase\u001b[m\u001b[K = \"results/comp4_det_test_\";\n \u001b[01;35m\u001b[K^~~~\u001b[m\u001b[K\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/dice.c -o obj/dice.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/yolo.c -o obj/yolo.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/detector.c -o obj/detector.o\n\u001b[01m\u001b[K./src/detector.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Ktrain_detector\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/detector.c:386:72:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Ksuggest parentheses around ‘\u001b[01m\u001b[K&&\u001b[m\u001b[K’ within ‘\u001b[01m\u001b[K||\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wparentheses\u001b[m\u001b[K]\n \u001b[01;35m\u001b[K(iteration >= (iter_save + 1000) || iteration % 1000 == 0) && net.max_batches < 10000\u001b[m\u001b[K)\n \u001b[01;35m\u001b[K~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/detector.c:319:13:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kvariable ‘\u001b[01m\u001b[Kdraw_precision\u001b[m\u001b[K’ set but not used [\u001b[01;35m\u001b[K-Wunused-but-set-variable\u001b[m\u001b[K]\n int \u001b[01;35m\u001b[Kdraw_precision\u001b[m\u001b[K = 0;\n \u001b[01;35m\u001b[K^~~~~~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/detector.c:67:11:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kavg_contrastive_acc\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float \u001b[01;35m\u001b[Kavg_contrastive_acc\u001b[m\u001b[K = 0;\n \u001b[01;35m\u001b[K^~~~~~~~~~~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/detector.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kprint_cocos\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/detector.c:486:29:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kformat not a string literal and no format arguments [\u001b[01;35m\u001b[K-Wformat-security\u001b[m\u001b[K]\n fprintf(fp, \u001b[01;35m\u001b[Kbuff\u001b[m\u001b[K);\n \u001b[01;35m\u001b[K^~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/detector.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Keliminate_bdd\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/detector.c:579:21:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kstatement with no effect [\u001b[01;35m\u001b[K-Wunused-value\u001b[m\u001b[K]\n \u001b[01;35m\u001b[Kfor\u001b[m\u001b[K (k; buf[k + n] != '\\0'; k++)\n \u001b[01;35m\u001b[K^~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/detector.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kvalidate_detector\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/detector.c:700:13:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kmkd2\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n int \u001b[01;35m\u001b[Kmkd2\u001b[m\u001b[K = make_directory(buff2, 0777);\n \u001b[01;35m\u001b[K^~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/detector.c:698:13:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kmkd\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n int \u001b[01;35m\u001b[Kmkd\u001b[m\u001b[K = make_directory(buff, 0777);\n \u001b[01;35m\u001b[K^~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/detector.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kvalidate_detector_map\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/detector.c:1332:15:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kclass_recall\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float \u001b[01;35m\u001b[Kclass_recall\u001b[m\u001b[K = (float)tp_for_thresh_per_class[i] / ((float)tp_for_thresh_per_class[i] + (float)(truth_classes_count[i] - tp_for_thresh_per_class[i]));\n \u001b[01;35m\u001b[K^~~~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/detector.c:1331:15:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kclass_precision\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float \u001b[01;35m\u001b[Kclass_precision\u001b[m\u001b[K = (float)tp_for_thresh_per_class[i] / ((float)tp_for_thresh_per_class[i] + (float)fp_for_thresh_per_class[i]);\n \u001b[01;35m\u001b[K^~~~~~~~~~~~~~~\u001b[m\u001b[K\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/layer.c -o obj/layer.o\n\u001b[01m\u001b[K./src/layer.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kfree_layer_custom\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/layer.c:208:68:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Ksuggest parentheses around ‘\u001b[01m\u001b[K&&\u001b[m\u001b[K’ within ‘\u001b[01m\u001b[K||\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wparentheses\u001b[m\u001b[K]\n if (l.delta_gpu && (l.optimized_memory < 1 || \u001b[01;35m\u001b[Kl.keep_delta_gpu && l.optimized_memory < 3\u001b[m\u001b[K)) cuda_free(l.delta_gpu), l.delta_gpu = NULL;\n \u001b[01;35m\u001b[K~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~\u001b[m\u001b[K\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/compare.c -o obj/compare.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/classifier.c -o obj/classifier.o\n\u001b[01m\u001b[K./src/classifier.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Ktrain_classifier\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/classifier.c:190:13:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kvariable ‘\u001b[01m\u001b[Kdraw_precision\u001b[m\u001b[K’ set but not used [\u001b[01;35m\u001b[K-Wunused-but-set-variable\u001b[m\u001b[K]\n int \u001b[01;35m\u001b[Kdraw_precision\u001b[m\u001b[K = 0;\n \u001b[01;35m\u001b[K^~~~~~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/classifier.c:146:9:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kcount\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n int \u001b[01;35m\u001b[Kcount\u001b[m\u001b[K = 0;\n \u001b[01;35m\u001b[K^~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/classifier.c:35:11:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kavg_contrastive_acc\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float \u001b[01;35m\u001b[Kavg_contrastive_acc\u001b[m\u001b[K = 0;\n \u001b[01;35m\u001b[K^~~~~~~~~~~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/classifier.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kpredict_classifier\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/classifier.c:855:13:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Ktime\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n clock_t \u001b[01;35m\u001b[Ktime\u001b[m\u001b[K;\n \u001b[01;35m\u001b[K^~~~\u001b[m\u001b[K\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/local_layer.c -o obj/local_layer.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/swag.c -o obj/swag.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/shortcut_layer.c -o obj/shortcut_layer.o\n\u001b[01m\u001b[K./src/shortcut_layer.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kmake_shortcut_layer\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/shortcut_layer.c:55:15:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kscale\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float \u001b[01;35m\u001b[Kscale\u001b[m\u001b[K = sqrt(2. / l.nweights);\n \u001b[01;35m\u001b[K^~~~~\u001b[m\u001b[K\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/activation_layer.c -o obj/activation_layer.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/rnn_layer.c -o obj/rnn_layer.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/gru_layer.c -o obj/gru_layer.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/rnn.c -o obj/rnn.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/rnn_vid.c -o obj/rnn_vid.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/crnn_layer.c -o obj/crnn_layer.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/demo.c -o obj/demo.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/tag.c -o obj/tag.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/cifar.c -o obj/cifar.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/go.c -o obj/go.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/batchnorm_layer.c -o obj/batchnorm_layer.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/art.c -o obj/art.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/region_layer.c -o obj/region_layer.o\n\u001b[01m\u001b[K./src/region_layer.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kresize_region_layer\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/region_layer.c:59:9:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kold_h\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n int \u001b[01;35m\u001b[Kold_h\u001b[m\u001b[K = l->h;\n \u001b[01;35m\u001b[K^~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/region_layer.c:58:9:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kold_w\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n int \u001b[01;35m\u001b[Kold_w\u001b[m\u001b[K = l->w;\n \u001b[01;35m\u001b[K^~~~~\u001b[m\u001b[K\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/reorg_layer.c -o obj/reorg_layer.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/reorg_old_layer.c -o obj/reorg_old_layer.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/super.c -o obj/super.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/voxel.c -o obj/voxel.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/tree.c -o obj/tree.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/yolo_layer.c -o obj/yolo_layer.o\n\u001b[01m\u001b[K./src/yolo_layer.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kmake_yolo_layer\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/yolo_layer.c:68:38:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kpassing argument 1 of ‘\u001b[01m\u001b[KcudaHostAlloc\u001b[m\u001b[K’ from incompatible pointer type [\u001b[01;35m\u001b[K-Wincompatible-pointer-types\u001b[m\u001b[K]\n if (cudaSuccess == cudaHostAlloc(\u001b[01;35m\u001b[K&\u001b[m\u001b[Kl.output, batch*l.outputs*sizeof(float), cudaHostRegisterMapped)) l.output_pinned = 1;\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\nIn file included from \u001b[01m\u001b[K/usr/local/cuda/include/cuda_runtime.h:96:0\u001b[m\u001b[K,\n from \u001b[01m\u001b[Kinclude/darknet.h:41\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/activations.h:3\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/layer.h:4\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/yolo_layer.h:5\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/yolo_layer.c:1\u001b[m\u001b[K:\n\u001b[01m\u001b[K/usr/local/cuda/include/cuda_runtime_api.h:4391:39:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kexpected ‘\u001b[01m\u001b[Kvoid **\u001b[m\u001b[K’ but argument is of type ‘\u001b[01m\u001b[Kfloat **\u001b[m\u001b[K’\n extern __host__ cudaError_t CUDARTAPI \u001b[01;36m\u001b[KcudaHostAlloc\u001b[m\u001b[K(void **pHost, size_t size, unsigned int flags);\n \u001b[01;36m\u001b[K^~~~~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/yolo_layer.c:75:38:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kpassing argument 1 of ‘\u001b[01m\u001b[KcudaHostAlloc\u001b[m\u001b[K’ from incompatible pointer type [\u001b[01;35m\u001b[K-Wincompatible-pointer-types\u001b[m\u001b[K]\n if (cudaSuccess == cudaHostAlloc(\u001b[01;35m\u001b[K&\u001b[m\u001b[Kl.delta, batch*l.outputs*sizeof(float), cudaHostRegisterMapped)) l.delta_pinned = 1;\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\nIn file included from \u001b[01m\u001b[K/usr/local/cuda/include/cuda_runtime.h:96:0\u001b[m\u001b[K,\n from \u001b[01m\u001b[Kinclude/darknet.h:41\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/activations.h:3\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/layer.h:4\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/yolo_layer.h:5\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/yolo_layer.c:1\u001b[m\u001b[K:\n\u001b[01m\u001b[K/usr/local/cuda/include/cuda_runtime_api.h:4391:39:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kexpected ‘\u001b[01m\u001b[Kvoid **\u001b[m\u001b[K’ but argument is of type ‘\u001b[01m\u001b[Kfloat **\u001b[m\u001b[K’\n extern __host__ cudaError_t CUDARTAPI \u001b[01;36m\u001b[KcudaHostAlloc\u001b[m\u001b[K(void **pHost, size_t size, unsigned int flags);\n \u001b[01;36m\u001b[K^~~~~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/yolo_layer.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kresize_yolo_layer\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/yolo_layer.c:106:42:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kpassing argument 1 of ‘\u001b[01m\u001b[KcudaHostAlloc\u001b[m\u001b[K’ from incompatible pointer type [\u001b[01;35m\u001b[K-Wincompatible-pointer-types\u001b[m\u001b[K]\n if (cudaSuccess != cudaHostAlloc(\u001b[01;35m\u001b[K&\u001b[m\u001b[Kl->output, l->batch*l->outputs * sizeof(float), cudaHostRegisterMapped)) {\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\nIn file included from \u001b[01m\u001b[K/usr/local/cuda/include/cuda_runtime.h:96:0\u001b[m\u001b[K,\n from \u001b[01m\u001b[Kinclude/darknet.h:41\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/activations.h:3\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/layer.h:4\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/yolo_layer.h:5\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/yolo_layer.c:1\u001b[m\u001b[K:\n\u001b[01m\u001b[K/usr/local/cuda/include/cuda_runtime_api.h:4391:39:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kexpected ‘\u001b[01m\u001b[Kvoid **\u001b[m\u001b[K’ but argument is of type ‘\u001b[01m\u001b[Kfloat **\u001b[m\u001b[K’\n extern __host__ cudaError_t CUDARTAPI \u001b[01;36m\u001b[KcudaHostAlloc\u001b[m\u001b[K(void **pHost, size_t size, unsigned int flags);\n \u001b[01;36m\u001b[K^~~~~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/yolo_layer.c:115:42:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kpassing argument 1 of ‘\u001b[01m\u001b[KcudaHostAlloc\u001b[m\u001b[K’ from incompatible pointer type [\u001b[01;35m\u001b[K-Wincompatible-pointer-types\u001b[m\u001b[K]\n if (cudaSuccess != cudaHostAlloc(\u001b[01;35m\u001b[K&\u001b[m\u001b[Kl->delta, l->batch*l->outputs * sizeof(float), cudaHostRegisterMapped)) {\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\nIn file included from \u001b[01m\u001b[K/usr/local/cuda/include/cuda_runtime.h:96:0\u001b[m\u001b[K,\n from \u001b[01m\u001b[Kinclude/darknet.h:41\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/activations.h:3\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/layer.h:4\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/yolo_layer.h:5\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/yolo_layer.c:1\u001b[m\u001b[K:\n\u001b[01m\u001b[K/usr/local/cuda/include/cuda_runtime_api.h:4391:39:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kexpected ‘\u001b[01m\u001b[Kvoid **\u001b[m\u001b[K’ but argument is of type ‘\u001b[01m\u001b[Kfloat **\u001b[m\u001b[K’\n extern __host__ cudaError_t CUDARTAPI \u001b[01;36m\u001b[KcudaHostAlloc\u001b[m\u001b[K(void **pHost, size_t size, unsigned int flags);\n \u001b[01;36m\u001b[K^~~~~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/yolo_layer.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kprocess_batch\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/yolo_layer.c:426:25:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kvariable ‘\u001b[01m\u001b[Kbest_match_t\u001b[m\u001b[K’ set but not used [\u001b[01;35m\u001b[K-Wunused-but-set-variable\u001b[m\u001b[K]\n int \u001b[01;35m\u001b[Kbest_match_t\u001b[m\u001b[K = 0;\n \u001b[01;35m\u001b[K^~~~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/yolo_layer.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kforward_yolo_layer\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/yolo_layer.c:707:11:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kavg_anyobj\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float \u001b[01;35m\u001b[Kavg_anyobj\u001b[m\u001b[K = 0;\n \u001b[01;35m\u001b[K^~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/yolo_layer.c:706:11:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kavg_obj\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float \u001b[01;35m\u001b[Kavg_obj\u001b[m\u001b[K = 0;\n \u001b[01;35m\u001b[K^~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/yolo_layer.c:705:11:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kavg_cat\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float \u001b[01;35m\u001b[Kavg_cat\u001b[m\u001b[K = 0;\n \u001b[01;35m\u001b[K^~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/yolo_layer.c:704:11:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Krecall75\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float \u001b[01;35m\u001b[Krecall75\u001b[m\u001b[K = 0;\n \u001b[01;35m\u001b[K^~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/yolo_layer.c:703:11:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Krecall\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float \u001b[01;35m\u001b[Krecall\u001b[m\u001b[K = 0;\n \u001b[01;35m\u001b[K^~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/yolo_layer.c:702:11:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Ktot_ciou_loss\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float \u001b[01;35m\u001b[Ktot_ciou_loss\u001b[m\u001b[K = 0;\n \u001b[01;35m\u001b[K^~~~~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/yolo_layer.c:701:11:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Ktot_diou_loss\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float \u001b[01;35m\u001b[Ktot_diou_loss\u001b[m\u001b[K = 0;\n \u001b[01;35m\u001b[K^~~~~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/yolo_layer.c:698:11:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Ktot_ciou\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float \u001b[01;35m\u001b[Ktot_ciou\u001b[m\u001b[K = 0;\n \u001b[01;35m\u001b[K^~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/yolo_layer.c:697:11:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Ktot_diou\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float \u001b[01;35m\u001b[Ktot_diou\u001b[m\u001b[K = 0;\n \u001b[01;35m\u001b[K^~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/yolo_layer.c:696:11:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Ktot_giou\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n float \u001b[01;35m\u001b[Ktot_giou\u001b[m\u001b[K = 0;\n \u001b[01;35m\u001b[K^~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/yolo_layer.c:668:12:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kunused variable ‘\u001b[01m\u001b[Kn\u001b[m\u001b[K’ [\u001b[01;35m\u001b[K-Wunused-variable\u001b[m\u001b[K]\n int b, \u001b[01;35m\u001b[Kn\u001b[m\u001b[K;\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/gaussian_yolo_layer.c -o obj/gaussian_yolo_layer.o\n\u001b[01m\u001b[K./src/gaussian_yolo_layer.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kmake_gaussian_yolo_layer\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/gaussian_yolo_layer.c:71:38:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kpassing argument 1 of ‘\u001b[01m\u001b[KcudaHostAlloc\u001b[m\u001b[K’ from incompatible pointer type [\u001b[01;35m\u001b[K-Wincompatible-pointer-types\u001b[m\u001b[K]\n if (cudaSuccess == cudaHostAlloc(\u001b[01;35m\u001b[K&\u001b[m\u001b[Kl.output, batch*l.outputs * sizeof(float), cudaHostRegisterMapped)) l.output_pinned = 1;\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\nIn file included from \u001b[01m\u001b[K/usr/local/cuda/include/cuda_runtime.h:96:0\u001b[m\u001b[K,\n from \u001b[01m\u001b[Kinclude/darknet.h:41\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/gaussian_yolo_layer.h:5\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/gaussian_yolo_layer.c:7\u001b[m\u001b[K:\n\u001b[01m\u001b[K/usr/local/cuda/include/cuda_runtime_api.h:4391:39:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kexpected ‘\u001b[01m\u001b[Kvoid **\u001b[m\u001b[K’ but argument is of type ‘\u001b[01m\u001b[Kfloat **\u001b[m\u001b[K’\n extern __host__ cudaError_t CUDARTAPI \u001b[01;36m\u001b[KcudaHostAlloc\u001b[m\u001b[K(void **pHost, size_t size, unsigned int flags);\n \u001b[01;36m\u001b[K^~~~~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/gaussian_yolo_layer.c:78:38:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kpassing argument 1 of ‘\u001b[01m\u001b[KcudaHostAlloc\u001b[m\u001b[K’ from incompatible pointer type [\u001b[01;35m\u001b[K-Wincompatible-pointer-types\u001b[m\u001b[K]\n if (cudaSuccess == cudaHostAlloc(\u001b[01;35m\u001b[K&\u001b[m\u001b[Kl.delta, batch*l.outputs * sizeof(float), cudaHostRegisterMapped)) l.delta_pinned = 1;\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\nIn file included from \u001b[01m\u001b[K/usr/local/cuda/include/cuda_runtime.h:96:0\u001b[m\u001b[K,\n from \u001b[01m\u001b[Kinclude/darknet.h:41\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/gaussian_yolo_layer.h:5\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/gaussian_yolo_layer.c:7\u001b[m\u001b[K:\n\u001b[01m\u001b[K/usr/local/cuda/include/cuda_runtime_api.h:4391:39:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kexpected ‘\u001b[01m\u001b[Kvoid **\u001b[m\u001b[K’ but argument is of type ‘\u001b[01m\u001b[Kfloat **\u001b[m\u001b[K’\n extern __host__ cudaError_t CUDARTAPI \u001b[01;36m\u001b[KcudaHostAlloc\u001b[m\u001b[K(void **pHost, size_t size, unsigned int flags);\n \u001b[01;36m\u001b[K^~~~~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/gaussian_yolo_layer.c:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kresize_gaussian_yolo_layer\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/gaussian_yolo_layer.c:110:42:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kpassing argument 1 of ‘\u001b[01m\u001b[KcudaHostAlloc\u001b[m\u001b[K’ from incompatible pointer type [\u001b[01;35m\u001b[K-Wincompatible-pointer-types\u001b[m\u001b[K]\n if (cudaSuccess != cudaHostAlloc(\u001b[01;35m\u001b[K&\u001b[m\u001b[Kl->output, l->batch*l->outputs * sizeof(float), cudaHostRegisterMapped)) {\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\nIn file included from \u001b[01m\u001b[K/usr/local/cuda/include/cuda_runtime.h:96:0\u001b[m\u001b[K,\n from \u001b[01m\u001b[Kinclude/darknet.h:41\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/gaussian_yolo_layer.h:5\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/gaussian_yolo_layer.c:7\u001b[m\u001b[K:\n\u001b[01m\u001b[K/usr/local/cuda/include/cuda_runtime_api.h:4391:39:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kexpected ‘\u001b[01m\u001b[Kvoid **\u001b[m\u001b[K’ but argument is of type ‘\u001b[01m\u001b[Kfloat **\u001b[m\u001b[K’\n extern __host__ cudaError_t CUDARTAPI \u001b[01;36m\u001b[KcudaHostAlloc\u001b[m\u001b[K(void **pHost, size_t size, unsigned int flags);\n \u001b[01;36m\u001b[K^~~~~~~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[K./src/gaussian_yolo_layer.c:119:42:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kpassing argument 1 of ‘\u001b[01m\u001b[KcudaHostAlloc\u001b[m\u001b[K’ from incompatible pointer type [\u001b[01;35m\u001b[K-Wincompatible-pointer-types\u001b[m\u001b[K]\n if (cudaSuccess != cudaHostAlloc(\u001b[01;35m\u001b[K&\u001b[m\u001b[Kl->delta, l->batch*l->outputs * sizeof(float), cudaHostRegisterMapped)) {\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\nIn file included from \u001b[01m\u001b[K/usr/local/cuda/include/cuda_runtime.h:96:0\u001b[m\u001b[K,\n from \u001b[01m\u001b[Kinclude/darknet.h:41\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/gaussian_yolo_layer.h:5\u001b[m\u001b[K,\n from \u001b[01m\u001b[K./src/gaussian_yolo_layer.c:7\u001b[m\u001b[K:\n\u001b[01m\u001b[K/usr/local/cuda/include/cuda_runtime_api.h:4391:39:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kexpected ‘\u001b[01m\u001b[Kvoid **\u001b[m\u001b[K’ but argument is of type ‘\u001b[01m\u001b[Kfloat **\u001b[m\u001b[K’\n extern __host__ cudaError_t CUDARTAPI \u001b[01;36m\u001b[KcudaHostAlloc\u001b[m\u001b[K(void **pHost, size_t size, unsigned int flags);\n \u001b[01;36m\u001b[K^~~~~~~~~~~~~\u001b[m\u001b[K\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/upsample_layer.c -o obj/upsample_layer.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/lstm_layer.c -o obj/lstm_layer.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/conv_lstm_layer.c -o obj/conv_lstm_layer.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/scale_channels_layer.c -o obj/scale_channels_layer.o\ngcc -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF -c ./src/sam_layer.c -o obj/sam_layer.o\nnvcc -gencode arch=compute_35,code=sm_35 -gencode arch=compute_50,code=[sm_50,compute_50] -gencode arch=compute_52,code=[sm_52,compute_52] -gencode arch=compute_61,code=[sm_61,compute_61] -gencode arch=compute_70,code=[sm_70,compute_70] -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF --compiler-options \"-Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF\" -c ./src/convolutional_kernels.cu -o obj/convolutional_kernels.o\n\u001b[01m\u001b[K./src/convolutional_kernels.cu:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kvoid backward_convolutional_layer_gpu(convolutional_layer, network_state)\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/convolutional_kernels.cu:853:40:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kcomparison between signed and unsigned integer expressions [\u001b[01;35m\u001b[K-Wsign-compare\u001b[m\u001b[K]\n \u001b[01;35m\u001b[K if (*state.net.max_output16_size < l.\u001b[m\u001b[Knweights) {\n \u001b[01;35m\u001b[K~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~\u001b[m\u001b[K\nnvcc -gencode arch=compute_35,code=sm_35 -gencode arch=compute_50,code=[sm_50,compute_50] -gencode arch=compute_52,code=[sm_52,compute_52] -gencode arch=compute_61,code=[sm_61,compute_61] -gencode arch=compute_70,code=[sm_70,compute_70] -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF --compiler-options \"-Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF\" -c ./src/activation_kernels.cu -o obj/activation_kernels.o\n./src/activation_kernels.cu(263): warning: variable \"MISH_THRESHOLD\" was declared but never referenced\n\n./src/activation_kernels.cu(263): warning: variable \"MISH_THRESHOLD\" was declared but never referenced\n\n./src/activation_kernels.cu(263): warning: variable \"MISH_THRESHOLD\" was declared but never referenced\n\n./src/activation_kernels.cu(263): warning: variable \"MISH_THRESHOLD\" was declared but never referenced\n\n./src/activation_kernels.cu(263): warning: variable \"MISH_THRESHOLD\" was declared but never referenced\n\nnvcc -gencode arch=compute_35,code=sm_35 -gencode arch=compute_50,code=[sm_50,compute_50] -gencode arch=compute_52,code=[sm_52,compute_52] -gencode arch=compute_61,code=[sm_61,compute_61] -gencode arch=compute_70,code=[sm_70,compute_70] -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF --compiler-options \"-Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF\" -c ./src/im2col_kernels.cu -o obj/im2col_kernels.o\nnvcc -gencode arch=compute_35,code=sm_35 -gencode arch=compute_50,code=[sm_50,compute_50] -gencode arch=compute_52,code=[sm_52,compute_52] -gencode arch=compute_61,code=[sm_61,compute_61] -gencode arch=compute_70,code=[sm_70,compute_70] -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF --compiler-options \"-Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF\" -c ./src/col2im_kernels.cu -o obj/col2im_kernels.o\nnvcc -gencode arch=compute_35,code=sm_35 -gencode arch=compute_50,code=[sm_50,compute_50] -gencode arch=compute_52,code=[sm_52,compute_52] -gencode arch=compute_61,code=[sm_61,compute_61] -gencode arch=compute_70,code=[sm_70,compute_70] -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF --compiler-options \"-Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF\" -c ./src/blas_kernels.cu -o obj/blas_kernels.o\n./src/blas_kernels.cu(1086): warning: variable \"out_index\" was declared but never referenced\n\n./src/blas_kernels.cu(1130): warning: variable \"step\" was set but never used\n\n./src/blas_kernels.cu(1736): warning: variable \"stage_id\" was declared but never referenced\n\n./src/blas_kernels.cu(1086): warning: variable \"out_index\" was declared but never referenced\n\n./src/blas_kernels.cu(1130): warning: variable \"step\" was set but never used\n\n./src/blas_kernels.cu(1736): warning: variable \"stage_id\" was declared but never referenced\n\n./src/blas_kernels.cu(1086): warning: variable \"out_index\" was declared but never referenced\n\n./src/blas_kernels.cu(1130): warning: variable \"step\" was set but never used\n\n./src/blas_kernels.cu(1736): warning: variable \"stage_id\" was declared but never referenced\n\n./src/blas_kernels.cu(1086): warning: variable \"out_index\" was declared but never referenced\n\n./src/blas_kernels.cu(1130): warning: variable \"step\" was set but never used\n\n./src/blas_kernels.cu(1736): warning: variable \"stage_id\" was declared but never referenced\n\n./src/blas_kernels.cu(1086): warning: variable \"out_index\" was declared but never referenced\n\n./src/blas_kernels.cu(1130): warning: variable \"step\" was set but never used\n\n./src/blas_kernels.cu(1736): warning: variable \"stage_id\" was declared but never referenced\n\n\u001b[01m\u001b[K./src/blas_kernels.cu:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kvoid backward_shortcut_multilayer_gpu(int, int, int, int*, float**, float*, float*, float*, float*, int, float*, float**, WEIGHTS_NORMALIZATION_T)\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/blas_kernels.cu:1130:5:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kvariable ‘\u001b[01m\u001b[Kstep\u001b[m\u001b[K’ set but not used [\u001b[01;35m\u001b[K-Wunused-but-set-variable\u001b[m\u001b[K]\n \u001b[01;35m\u001b[Kint \u001b[m\u001b[Kstep = 0;\n \u001b[01;35m\u001b[K^~~~\u001b[m\u001b[K\nnvcc -gencode arch=compute_35,code=sm_35 -gencode arch=compute_50,code=[sm_50,compute_50] -gencode arch=compute_52,code=[sm_52,compute_52] -gencode arch=compute_61,code=[sm_61,compute_61] -gencode arch=compute_70,code=[sm_70,compute_70] -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF --compiler-options \"-Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF\" -c ./src/crop_layer_kernels.cu -o obj/crop_layer_kernels.o\nnvcc -gencode arch=compute_35,code=sm_35 -gencode arch=compute_50,code=[sm_50,compute_50] -gencode arch=compute_52,code=[sm_52,compute_52] -gencode arch=compute_61,code=[sm_61,compute_61] -gencode arch=compute_70,code=[sm_70,compute_70] -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF --compiler-options \"-Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF\" -c ./src/dropout_layer_kernels.cu -o obj/dropout_layer_kernels.o\n./src/dropout_layer_kernels.cu(140): warning: variable \"cur_scale\" was declared but never referenced\n\n./src/dropout_layer_kernels.cu(245): warning: variable \"cur_scale\" was declared but never referenced\n\n./src/dropout_layer_kernels.cu(262): warning: variable \"block_prob\" was declared but never referenced\n\n./src/dropout_layer_kernels.cu(140): warning: variable \"cur_scale\" was declared but never referenced\n\n./src/dropout_layer_kernels.cu(245): warning: variable \"cur_scale\" was declared but never referenced\n\n./src/dropout_layer_kernels.cu(262): warning: variable \"block_prob\" was declared but never referenced\n\n./src/dropout_layer_kernels.cu(140): warning: variable \"cur_scale\" was declared but never referenced\n\n./src/dropout_layer_kernels.cu(245): warning: variable \"cur_scale\" was declared but never referenced\n\n./src/dropout_layer_kernels.cu(262): warning: variable \"block_prob\" was declared but never referenced\n\n./src/dropout_layer_kernels.cu(140): warning: variable \"cur_scale\" was declared but never referenced\n\n./src/dropout_layer_kernels.cu(245): warning: variable \"cur_scale\" was declared but never referenced\n\n./src/dropout_layer_kernels.cu(262): warning: variable \"block_prob\" was declared but never referenced\n\n./src/dropout_layer_kernels.cu(140): warning: variable \"cur_scale\" was declared but never referenced\n\n./src/dropout_layer_kernels.cu(245): warning: variable \"cur_scale\" was declared but never referenced\n\n./src/dropout_layer_kernels.cu(262): warning: variable \"block_prob\" was declared but never referenced\n\nnvcc -gencode arch=compute_35,code=sm_35 -gencode arch=compute_50,code=[sm_50,compute_50] -gencode arch=compute_52,code=[sm_52,compute_52] -gencode arch=compute_61,code=[sm_61,compute_61] -gencode arch=compute_70,code=[sm_70,compute_70] -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF --compiler-options \"-Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF\" -c ./src/maxpool_layer_kernels.cu -o obj/maxpool_layer_kernels.o\nnvcc -gencode arch=compute_35,code=sm_35 -gencode arch=compute_50,code=[sm_50,compute_50] -gencode arch=compute_52,code=[sm_52,compute_52] -gencode arch=compute_61,code=[sm_61,compute_61] -gencode arch=compute_70,code=[sm_70,compute_70] -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF --compiler-options \"-Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF\" -c ./src/network_kernels.cu -o obj/network_kernels.o\n./src/network_kernels.cu(364): warning: variable \"l\" was declared but never referenced\n\n./src/network_kernels.cu(364): warning: variable \"l\" was declared but never referenced\n\n./src/network_kernels.cu(364): warning: variable \"l\" was declared but never referenced\n\n./src/network_kernels.cu(364): warning: variable \"l\" was declared but never referenced\n\n./src/network_kernels.cu(364): warning: variable \"l\" was declared but never referenced\n\n\u001b[01m\u001b[K./src/network_kernels.cu:\u001b[m\u001b[K In function ‘\u001b[01m\u001b[Kfloat train_network_datum_gpu(network, float*, float*)\u001b[m\u001b[K’:\n\u001b[01m\u001b[K./src/network_kernels.cu:364:7:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[Kvariable ‘\u001b[01m\u001b[Kl\u001b[m\u001b[K’ set but not used [\u001b[01;35m\u001b[K-Wunused-but-set-variable\u001b[m\u001b[K]\n \u001b[01;35m\u001b[K \u001b[m\u001b[K layer l = net.layers[net.n - 1];\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\nnvcc -gencode arch=compute_35,code=sm_35 -gencode arch=compute_50,code=[sm_50,compute_50] -gencode arch=compute_52,code=[sm_52,compute_52] -gencode arch=compute_61,code=[sm_61,compute_61] -gencode arch=compute_70,code=[sm_70,compute_70] -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF --compiler-options \"-Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF\" -c ./src/avgpool_layer_kernels.cu -o obj/avgpool_layer_kernels.o\ng++ -std=c++11 -std=c++11 -Iinclude/ -I3rdparty/stb/include -DGPU -I/usr/local/cuda/include/ -DCUDNN -DCUDNN_HALF -Wall -Wfatal-errors -Wno-unused-result -Wno-unknown-pragmas -fPIC -Ofast -DGPU -DCUDNN -I/usr/local/cudnn/include -DCUDNN_HALF obj/image_opencv.o obj/http_stream.o obj/gemm.o obj/utils.o obj/dark_cuda.o obj/convolutional_layer.o obj/list.o obj/image.o obj/activations.o obj/im2col.o obj/col2im.o obj/blas.o obj/crop_layer.o obj/dropout_layer.o obj/maxpool_layer.o obj/softmax_layer.o obj/data.o obj/matrix.o obj/network.o obj/connected_layer.o obj/cost_layer.o obj/parser.o obj/option_list.o obj/darknet.o obj/detection_layer.o obj/captcha.o obj/route_layer.o obj/writing.o obj/box.o obj/nightmare.o obj/normalization_layer.o obj/avgpool_layer.o obj/coco.o obj/dice.o obj/yolo.o obj/detector.o obj/layer.o obj/compare.o obj/classifier.o obj/local_layer.o obj/swag.o obj/shortcut_layer.o obj/activation_layer.o obj/rnn_layer.o obj/gru_layer.o obj/rnn.o obj/rnn_vid.o obj/crnn_layer.o obj/demo.o obj/tag.o obj/cifar.o obj/go.o obj/batchnorm_layer.o obj/art.o obj/region_layer.o obj/reorg_layer.o obj/reorg_old_layer.o obj/super.o obj/voxel.o obj/tree.o obj/yolo_layer.o obj/gaussian_yolo_layer.o obj/upsample_layer.o obj/lstm_layer.o obj/conv_lstm_layer.o obj/scale_channels_layer.o obj/sam_layer.o obj/convolutional_kernels.o obj/activation_kernels.o obj/im2col_kernels.o obj/col2im_kernels.o obj/blas_kernels.o obj/crop_layer_kernels.o obj/dropout_layer_kernels.o obj/maxpool_layer_kernels.o obj/network_kernels.o obj/avgpool_layer_kernels.o -o darknet -lm -pthread -L/usr/local/cuda/lib64 -lcuda -lcudart -lcublas -lcurand -L/usr/local/cudnn/lib64 -lcudnn -lstdc++\n" ] ], [ [ "#### 3. Testing the validate dataset using YOLOv3 with pre-trained weights and storing the coordinates in a json file", "_____no_output_____" ] ], [ [ "!./darknet detector test data/obj.data yolo-obj.cfg yolo-obj_final_vehicle.weights -ext_output -dont_show -out result_vehicle_plates.json < data/valid.txt", " CUDA-version: 10010 (10010), cuDNN: 7.6.5, CUDNN_HALF=1, GPU count: 1 \n CUDNN_HALF=1 \n OpenCV isn't used - data augmentation will be slow \n 0 : compute_capability = 750, cudnn_half = 1, GPU: Tesla T4 \nnet.optimized_memory = 0 \nmini_batch = 1, batch = 64, time_steps = 1, train = 0 \n layer filters size/strd(dil) input output\n 0 conv 32 3 x 3/ 1 608 x 608 x 3 -> 608 x 608 x 32 0.639 BF\n 1 conv 64 3 x 3/ 2 608 x 608 x 32 -> 304 x 304 x 64 3.407 BF\n 2 conv 32 1 x 1/ 1 304 x 304 x 64 -> 304 x 304 x 32 0.379 BF\n 3 conv 64 3 x 3/ 1 304 x 304 x 32 -> 304 x 304 x 64 3.407 BF\n 4 Shortcut Layer: 1, wt = 0, wn = 0, outputs: 304 x 304 x 64 0.006 BF\n 5 conv 128 3 x 3/ 2 304 x 304 x 64 -> 152 x 152 x 128 3.407 BF\n 6 conv 64 1 x 1/ 1 152 x 152 x 128 -> 152 x 152 x 64 0.379 BF\n 7 conv 128 3 x 3/ 1 152 x 152 x 64 -> 152 x 152 x 128 3.407 BF\n 8 Shortcut Layer: 5, wt = 0, wn = 0, outputs: 152 x 152 x 128 0.003 BF\n 9 conv 64 1 x 1/ 1 152 x 152 x 128 -> 152 x 152 x 64 0.379 BF\n 10 conv 128 3 x 3/ 1 152 x 152 x 64 -> 152 x 152 x 128 3.407 BF\n 11 Shortcut Layer: 8, wt = 0, wn = 0, outputs: 152 x 152 x 128 0.003 BF\n 12 conv 256 3 x 3/ 2 152 x 152 x 128 -> 76 x 76 x 256 3.407 BF\n 13 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 14 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 15 Shortcut Layer: 12, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 16 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 17 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 18 Shortcut Layer: 15, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 19 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 20 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 21 Shortcut Layer: 18, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 22 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 23 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 24 Shortcut Layer: 21, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 25 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 26 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 27 Shortcut Layer: 24, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 28 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 29 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 30 Shortcut Layer: 27, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 31 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 32 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 33 Shortcut Layer: 30, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 34 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 35 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 36 Shortcut Layer: 33, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 37 conv 512 3 x 3/ 2 76 x 76 x 256 -> 38 x 38 x 512 3.407 BF\n 38 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 39 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 40 Shortcut Layer: 37, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 41 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 42 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 43 Shortcut Layer: 40, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 44 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 45 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 46 Shortcut Layer: 43, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 47 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 48 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 49 Shortcut Layer: 46, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 50 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 51 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 52 Shortcut Layer: 49, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 53 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 54 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 55 Shortcut Layer: 52, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 56 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 57 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 58 Shortcut Layer: 55, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 59 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 60 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 61 Shortcut Layer: 58, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 62 conv 1024 3 x 3/ 2 38 x 38 x 512 -> 19 x 19 x1024 3.407 BF\n 63 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 64 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 65 Shortcut Layer: 62, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 66 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 67 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 68 Shortcut Layer: 65, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 69 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 70 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 71 Shortcut Layer: 68, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 72 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 73 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 74 Shortcut Layer: 71, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 75 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 76 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 77 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 78 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 79 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 80 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 81 conv 18 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 18 0.013 BF\n 82 yolo\n[yolo] params: iou loss: mse (2), iou_norm: 0.75, obj_norm: 1.00, cls_norm: 1.00, delta_norm: 1.00, scale_x_y: 1.00\n 83 route 79 \t\t -> 19 x 19 x 512 \n 84 conv 256 1 x 1/ 1 19 x 19 x 512 -> 19 x 19 x 256 0.095 BF\n 85 upsample 2x 19 x 19 x 256 -> 38 x 38 x 256\n 86 route 85 61 \t -> 38 x 38 x 768 \n 87 conv 256 1 x 1/ 1 38 x 38 x 768 -> 38 x 38 x 256 0.568 BF\n 88 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 89 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 90 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 91 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 92 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 93 conv 18 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 18 0.027 BF\n 94 yolo\n[yolo] params: iou loss: mse (2), iou_norm: 0.75, obj_norm: 1.00, cls_norm: 1.00, delta_norm: 1.00, scale_x_y: 1.00\n 95 route 91 \t\t -> 38 x 38 x 256 \n 96 conv 128 1 x 1/ 1 38 x 38 x 256 -> 38 x 38 x 128 0.095 BF\n 97 upsample 2x 38 x 38 x 128 -> 76 x 76 x 128\n 98 route 97 36 \t -> 76 x 76 x 384 \n 99 conv 128 1 x 1/ 1 76 x 76 x 384 -> 76 x 76 x 128 0.568 BF\n 100 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 101 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 102 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 103 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 104 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 105 conv 18 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 18 0.053 BF\n 106 yolo\n[yolo] params: iou loss: mse (2), iou_norm: 0.75, obj_norm: 1.00, cls_norm: 1.00, delta_norm: 1.00, scale_x_y: 1.00\nTotal BFLOPS 139.496 \navg_outputs = 1103769 \n Allocate additional workspace_size = 52.43 MB \nLoading weights from yolo-obj_final_vehicle.weights...\n seen 64, trained: 128 K-images (2 Kilo-batches_64) \nDone! Loaded 107 layers from weights-file \nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/0.jpg: Predicted in 43.756000 milli-seconds.\nNumberPlate: 47%\t(left_x: 1363 top_y: 1941 width: 273 height: 134)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/1.jpg: Predicted in 43.664000 milli-seconds.\nNumberPlate: 93%\t(left_x: 688 top_y: 1955 width: 479 height: 171)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/10.jpg: Predicted in 43.720000 milli-seconds.\nNumberPlate: 100%\t(left_x: 551 top_y: 562 width: 600 height: 179)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/100.jpg: Predicted in 43.562000 milli-seconds.\nNumberPlate: 95%\t(left_x: 352 top_y: 334 width: 226 height: 46)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/101.jpg: Predicted in 43.773000 milli-seconds.\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/102.jpg: Predicted in 43.590000 milli-seconds.\nNumberPlate: 100%\t(left_x: 306 top_y: 515 width: 269 height: 66)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/103.jpg: Predicted in 43.588000 milli-seconds.\nNumberPlate: 97%\t(left_x: 302 top_y: 527 width: 248 height: 53)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/104.jpg: Predicted in 41.227000 milli-seconds.\nNumberPlate: 99%\t(left_x: 392 top_y: 527 width: 225 height: 59)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/105.jpg: Predicted in 41.142000 milli-seconds.\nNumberPlate: 99%\t(left_x: 96 top_y: 339 width: 140 height: 106)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/106.jpg: Predicted in 41.138000 milli-seconds.\nNumberPlate: 99%\t(left_x: 336 top_y: 384 width: 226 height: 60)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/107.jpg: Predicted in 41.158000 milli-seconds.\nNumberPlate: 70%\t(left_x: 710 top_y: 344 width: 150 height: 73)\nNumberPlate: 99%\t(left_x: 756 top_y: 344 width: 60 height: 74)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/108.jpg: Predicted in 41.261000 milli-seconds.\nNumberPlate: 100%\t(left_x: 361 top_y: 499 width: 263 height: 87)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/109.jpg: Predicted in 41.168000 milli-seconds.\nNumberPlate: 97%\t(left_x: 51 top_y: 386 width: 92 height: 86)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/11.jpg: Predicted in 41.070000 milli-seconds.\nNumberPlate: 99%\t(left_x: 359 top_y: 267 width: 135 height: 34)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/110.jpg: Predicted in 41.182000 milli-seconds.\nNumberPlate: 100%\t(left_x: 275 top_y: 512 width: 313 height: 64)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/111.jpg: Predicted in 41.130000 milli-seconds.\nNumberPlate: 100%\t(left_x: 671 top_y: 446 width: 116 height: 84)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/112.jpg: Predicted in 41.118000 milli-seconds.\nNumberPlate: 94%\t(left_x: 339 top_y: 547 width: 217 height: 57)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/113.jpg: Predicted in 42.100000 milli-seconds.\nNumberPlate: 100%\t(left_x: 137 top_y: 316 width: 120 height: 89)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/114.jpg: Predicted in 41.142000 milli-seconds.\nNumberPlate: 96%\t(left_x: 424 top_y: 383 width: 194 height: 64)\nNumberPlate: 73%\t(left_x: 600 top_y: 710 width: 126 height: 28)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/115.jpg: Predicted in 40.456000 milli-seconds.\nNumberPlate: 99%\t(left_x: 118 top_y: 349 width: 135 height: 84)\nNumberPlate: 46%\t(left_x: 609 top_y: 711 width: 108 height: 25)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/116.jpg: Predicted in 40.449000 milli-seconds.\nNumberPlate: 78%\t(left_x: 609 top_y: 710 width: 105 height: 30)\nNumberPlate: 89%\t(left_x: 718 top_y: 338 width: 70 height: 66)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/117.jpg: Predicted in 39.271000 milli-seconds.\nNumberPlate: 99%\t(left_x: 391 top_y: 489 width: 231 height: 60)\nNumberPlate: 77%\t(left_x: 609 top_y: 711 width: 106 height: 27)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/118.jpg: Predicted in 39.309000 milli-seconds.\nNumberPlate: 100%\t(left_x: 71 top_y: 348 width: 101 height: 80)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/119.jpg: Predicted in 39.129000 milli-seconds.\nNumberPlate: 100%\t(left_x: 311 top_y: 373 width: 263 height: 59)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/12.jpg: Predicted in 39.141000 milli-seconds.\nNumberPlate: 99%\t(left_x: 761 top_y: 496 width: 131 height: 29)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/120.jpg: Predicted in 39.049000 milli-seconds.\nNumberPlate: 100%\t(left_x: 682 top_y: 369 width: 100 height: 76)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/121.jpg: Predicted in 39.123000 milli-seconds.\nNumberPlate: 100%\t(left_x: 326 top_y: 527 width: 243 height: 56)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/122.jpg: Predicted in 39.122000 milli-seconds.\nNumberPlate: 100%\t(left_x: 99 top_y: 445 width: 113 height: 80)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/123.jpg: Predicted in 39.045000 milli-seconds.\nNumberPlate: 100%\t(left_x: 313 top_y: 514 width: 281 height: 67)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/124.jpg: Predicted in 39.159000 milli-seconds.\nNumberPlate: 99%\t(left_x: 320 top_y: 363 width: 289 height: 53)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/125.jpg: Predicted in 39.157000 milli-seconds.\nNumberPlate: 100%\t(left_x: 723 top_y: 444 width: 93 height: 81)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/126.jpg: Predicted in 39.139000 milli-seconds.\nNumberPlate: 97%\t(left_x: 392 top_y: 517 width: 198 height: 57)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/127.jpg: Predicted in 39.042000 milli-seconds.\nNumberPlate: 99%\t(left_x: 132 top_y: 388 width: 113 height: 71)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/128.jpg: Predicted in 39.142000 milli-seconds.\nNumberPlate: 100%\t(left_x: 405 top_y: 436 width: 172 height: 40)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/129.jpg: Predicted in 38.025000 milli-seconds.\nNumberPlate: 99%\t(left_x: 595 top_y: 431 width: 122 height: 73)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/13.jpg: Predicted in 37.835000 milli-seconds.\nNumberPlate: 100%\t(left_x: 406 top_y: 185 width: 138 height: 65)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/130.jpg: Predicted in 37.835000 milli-seconds.\nNumberPlate: 100%\t(left_x: 300 top_y: 526 width: 252 height: 57)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/131.jpg: Predicted in 31.532000 milli-seconds.\nNumberPlate: 98%\t(left_x: 295 top_y: 380 width: 256 height: 56)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/132.jpg: Predicted in 31.578000 milli-seconds.\nNumberPlate: 100%\t(left_x: 725 top_y: 348 width: 104 height: 69)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/133.jpg: Predicted in 31.475000 milli-seconds.\nNumberPlate: 99%\t(left_x: 317 top_y: 467 width: 307 height: 75)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/134.jpg: Predicted in 31.526000 milli-seconds.\nNumberPlate: 100%\t(left_x: 323 top_y: 494 width: 213 height: 46)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/135.jpg: Predicted in 31.633000 milli-seconds.\nNumberPlate: 100%\t(left_x: 798 top_y: 325 width: 93 height: 67)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/136.jpg: Predicted in 31.701000 milli-seconds.\nNumberPlate: 100%\t(left_x: 324 top_y: 461 width: 227 height: 58)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/137.jpg: Predicted in 31.540000 milli-seconds.\nNumberPlate: 100%\t(left_x: 188 top_y: 475 width: 156 height: 60)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/138.jpg: Predicted in 31.498000 milli-seconds.\nNumberPlate: 100%\t(left_x: 723 top_y: 361 width: 99 height: 48)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/139.jpg: Predicted in 31.624000 milli-seconds.\nNumberPlate: 100%\t(left_x: 61 top_y: 469 width: 114 height: 49)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/14.jpg: Predicted in 31.578000 milli-seconds.\nNumberPlate: 100%\t(left_x: 236 top_y: 194 width: 148 height: 36)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/140.jpg: Predicted in 31.675000 milli-seconds.\nNumberPlate: 84%\t(left_x: 586 top_y: 450 width: 106 height: 37)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/141.jpg: Predicted in 31.584000 milli-seconds.\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/15.jpg: Predicted in 31.562000 milli-seconds.\nNumberPlate: 100%\t(left_x: 322 top_y: 299 width: 128 height: 24)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/16.jpg: Predicted in 31.505000 milli-seconds.\nNumberPlate: 99%\t(left_x: 332 top_y: 253 width: 138 height: 31)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/17.jpg: Predicted in 31.473000 milli-seconds.\nNumberPlate: 100%\t(left_x: 251 top_y: 265 width: 135 height: 27)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/18.jpg: Predicted in 31.584000 milli-seconds.\nNumberPlate: 99%\t(left_x: 299 top_y: 213 width: 70 height: 19)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/19.jpg: Predicted in 31.466000 milli-seconds.\nNumberPlate: 100%\t(left_x: 216 top_y: 322 width: 184 height: 36)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/2.jpg: Predicted in 26.896000 milli-seconds.\nNumberPlate: 76%\t(left_x: 1524 top_y: 3102 width: 821 height: 321)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/20.jpg: Predicted in 26.679000 milli-seconds.\nNumberPlate: 100%\t(left_x: 400 top_y: 148 width: 98 height: 46)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/21.jpg: Predicted in 26.718000 milli-seconds.\nNumberPlate: 100%\t(left_x: 504 top_y: 375 width: 90 height: 46)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/22.jpg: Predicted in 26.602000 milli-seconds.\nNumberPlate: 45%\t(left_x: 503 top_y: 171 width: 39 height: 167)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/23.jpg: Predicted in 26.684000 milli-seconds.\nNumberPlate: 99%\t(left_x: 266 top_y: 417 width: 179 height: 33)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/24.jpg: Predicted in 26.248000 milli-seconds.\nNumberPlate: 100%\t(left_x: 424 top_y: 347 width: 149 height: 75)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/25.jpg: Predicted in 24.515000 milli-seconds.\nNumberPlate: 100%\t(left_x: 244 top_y: 406 width: 157 height: 35)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/26.jpg: Predicted in 24.447000 milli-seconds.\nNumberPlate: 100%\t(left_x: 263 top_y: 367 width: 154 height: 41)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/27.jpg: Predicted in 24.339000 milli-seconds.\nNumberPlate: 100%\t(left_x: 271 top_y: 265 width: 136 height: 33)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/28.jpg: Predicted in 24.249000 milli-seconds.\nNumberPlate: 98%\t(left_x: 193 top_y: 185 width: 98 height: 36)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/29.jpg: Predicted in 24.218000 milli-seconds.\nNumberPlate: 100%\t(left_x: 528 top_y: 222 width: 91 height: 56)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/3.jpg: Predicted in 24.533000 milli-seconds.\nNumberPlate: 99%\t(left_x: 1446 top_y: 3469 width: 689 height: 189)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/30.jpg: Predicted in 24.510000 milli-seconds.\nNumberPlate: 100%\t(left_x: 68 top_y: 255 width: 90 height: 67)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/31.jpg: Predicted in 24.356000 milli-seconds.\nNumberPlate: 98%\t(left_x: 282 top_y: 364 width: 130 height: 31)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/32.jpg: Predicted in 24.351000 milli-seconds.\nNumberPlate: 100%\t(left_x: 149 top_y: 367 width: 159 height: 58)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/33.jpg: Predicted in 24.330000 milli-seconds.\nNumberPlate: 100%\t(left_x: 263 top_y: 355 width: 130 height: 29)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/34.jpg: Predicted in 24.342000 milli-seconds.\nNumberPlate: 100%\t(left_x: 266 top_y: 371 width: 157 height: 38)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/35.jpg: Predicted in 24.262000 milli-seconds.\nNumberPlate: 100%\t(left_x: 261 top_y: 288 width: 185 height: 43)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/36.jpg: Predicted in 24.327000 milli-seconds.\nNumberPlate: 91%\t(left_x: 259 top_y: 251 width: 143 height: 29)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/37.jpg: Predicted in 24.013000 milli-seconds.\nNumberPlate: 100%\t(left_x: 328 top_y: 212 width: 78 height: 22)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/38.jpg: Predicted in 23.501000 milli-seconds.\nNumberPlate: 100%\t(left_x: 266 top_y: 299 width: 100 height: 23)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/39.jpg: Predicted in 23.327000 milli-seconds.\nNumberPlate: 100%\t(left_x: 225 top_y: 375 width: 206 height: 56)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/4.jpg: Predicted in 23.513000 milli-seconds.\nNumberPlate: 100%\t(left_x: 623 top_y: 292 width: 169 height: 41)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/40.jpg: Predicted in 23.447000 milli-seconds.\nNumberPlate: 96%\t(left_x: 330 top_y: 329 width: 100 height: 19)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/41.jpg: Predicted in 23.386000 milli-seconds.\nNumberPlate: 97%\t(left_x: 227 top_y: 286 width: 123 height: 31)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/42.jpg: Predicted in 23.428000 milli-seconds.\nNumberPlate: 98%\t(left_x: 142 top_y: 346 width: 147 height: 50)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/43.jpg: Predicted in 23.236000 milli-seconds.\nNumberPlate: 100%\t(left_x: 279 top_y: 298 width: 136 height: 33)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/44.jpg: Predicted in 23.290000 milli-seconds.\nNumberPlate: 100%\t(left_x: 174 top_y: 290 width: 121 height: 35)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/45.jpg: Predicted in 23.366000 milli-seconds.\nNumberPlate: 100%\t(left_x: 290 top_y: 282 width: 123 height: 26)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/46.jpg: Predicted in 23.392000 milli-seconds.\nNumberPlate: 99%\t(left_x: 283 top_y: 292 width: 106 height: 20)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/47.jpg: Predicted in 22.874000 milli-seconds.\nNumberPlate: 100%\t(left_x: 404 top_y: 257 width: 124 height: 34)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/48.jpg: Predicted in 22.897000 milli-seconds.\nNumberPlate: 98%\t(left_x: 51 top_y: 263 width: 115 height: 79)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/49.jpg: Predicted in 23.198000 milli-seconds.\nNumberPlate: 100%\t(left_x: 166 top_y: 414 width: 99 height: 49)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/5.jpg: Predicted in 23.057000 milli-seconds.\nNumberPlate: 99%\t(left_x: 1161 top_y: 2066 width: 652 height: 226)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/50.jpg: Predicted in 22.874000 milli-seconds.\nNumberPlate: 100%\t(left_x: 284 top_y: 237 width: 117 height: 26)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/51.jpg: Predicted in 23.020000 milli-seconds.\nNumberPlate: 100%\t(left_x: 286 top_y: 363 width: 173 height: 52)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/52.jpg: Predicted in 22.840000 milli-seconds.\nNumberPlate: 99%\t(left_x: 411 top_y: 295 width: 110 height: 56)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/53.jpg: Predicted in 23.024000 milli-seconds.\nNumberPlate: 99%\t(left_x: 260 top_y: 369 width: 160 height: 37)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/54.jpg: Predicted in 23.127000 milli-seconds.\nNumberPlate: 97%\t(left_x: 181 top_y: 244 width: 76 height: 39)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/55.jpg: Predicted in 23.226000 milli-seconds.\nNumberPlate: 100%\t(left_x: 513 top_y: 287 width: 75 height: 49)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/56.jpg: Predicted in 23.077000 milli-seconds.\nNumberPlate: 100%\t(left_x: 259 top_y: 407 width: 202 height: 49)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/57.jpg: Predicted in 23.043000 milli-seconds.\nNumberPlate: 100%\t(left_x: 116 top_y: 285 width: 113 height: 60)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/58.jpg: Predicted in 22.842000 milli-seconds.\nNumberPlate: 100%\t(left_x: 216 top_y: 366 width: 178 height: 44)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/59.jpg: Predicted in 23.095000 milli-seconds.\nNumberPlate: 100%\t(left_x: 261 top_y: 316 width: 114 height: 22)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/6.jpg: Predicted in 23.324000 milli-seconds.\nNumberPlate: 94%\t(left_x: 413 top_y: 621 width: 193 height: 58)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/60.jpg: Predicted in 23.057000 milli-seconds.\nNumberPlate: 100%\t(left_x: 90 top_y: 323 width: 152 height: 64)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/61.jpg: Predicted in 23.257000 milli-seconds.\nNumberPlate: 100%\t(left_x: 167 top_y: 135 width: 72 height: 46)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/62.jpg: Predicted in 23.212000 milli-seconds.\nNumberPlate: 100%\t(left_x: 360 top_y: 346 width: 119 height: 34)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/63.jpg: Predicted in 22.890000 milli-seconds.\nNumberPlate: 100%\t(left_x: 263 top_y: 225 width: 118 height: 27)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/64.jpg: Predicted in 23.065000 milli-seconds.\nNumberPlate: 100%\t(left_x: 233 top_y: 226 width: 129 height: 32)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/65.jpg: Predicted in 23.185000 milli-seconds.\nNumberPlate: 98%\t(left_x: 236 top_y: 219 width: 133 height: 31)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/66.jpg: Predicted in 23.361000 milli-seconds.\nNumberPlate: 100%\t(left_x: 181 top_y: 265 width: 126 height: 29)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/67.jpg: Predicted in 23.287000 milli-seconds.\nNumberPlate: 100%\t(left_x: 530 top_y: 304 width: 63 height: 38)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/68.jpg: Predicted in 23.133000 milli-seconds.\nNumberPlate: 100%\t(left_x: 266 top_y: 306 width: 100 height: 25)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/69.jpg: Predicted in 23.173000 milli-seconds.\nNumberPlate: 100%\t(left_x: 434 top_y: 153 width: 97 height: 38)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/7.jpg: Predicted in 23.266000 milli-seconds.\nNumberPlate: 93%\t(left_x: 1148 top_y: 1638 width: 513 height: 156)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/70.jpg: Predicted in 23.077000 milli-seconds.\nNumberPlate: 100%\t(left_x: 501 top_y: 334 width: 131 height: 82)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/71.jpg: Predicted in 23.184000 milli-seconds.\nNumberPlate: 100%\t(left_x: 205 top_y: 358 width: 209 height: 47)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/72.jpg: Predicted in 23.124000 milli-seconds.\nNumberPlate: 98%\t(left_x: 254 top_y: 213 width: 141 height: 36)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/73.jpg: Predicted in 23.025000 milli-seconds.\nNumberPlate: 99%\t(left_x: 114 top_y: 362 width: 141 height: 40)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/74.jpg: Predicted in 23.070000 milli-seconds.\nNumberPlate: 100%\t(left_x: 245 top_y: 226 width: 92 height: 23)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/75.jpg: Predicted in 23.198000 milli-seconds.\nNumberPlate: 99%\t(left_x: 540 top_y: 262 width: 113 height: 47)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/76.jpg: Predicted in 23.083000 milli-seconds.\nNumberPlate: 100%\t(left_x: 430 top_y: 183 width: 65 height: 35)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/77.jpg: Predicted in 23.032000 milli-seconds.\nNumberPlate: 100%\t(left_x: 259 top_y: 241 width: 117 height: 23)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/78.jpg: Predicted in 23.278000 milli-seconds.\nNumberPlate: 100%\t(left_x: 201 top_y: 393 width: 188 height: 38)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/79.jpg: Predicted in 23.052000 milli-seconds.\nNumberPlate: 100%\t(left_x: 264 top_y: 299 width: 193 height: 41)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/8.jpg: Predicted in 23.146000 milli-seconds.\nNumberPlate: 70%\t(left_x: 1694 top_y: 1830 width: 236 height: 183)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/80.jpg: Predicted in 22.855000 milli-seconds.\nNumberPlate: 100%\t(left_x: 305 top_y: 291 width: 122 height: 36)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/81.jpg: Predicted in 22.928000 milli-seconds.\nNumberPlate: 100%\t(left_x: 218 top_y: 307 width: 96 height: 30)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/82.jpg: Predicted in 22.864000 milli-seconds.\nNumberPlate: 100%\t(left_x: 278 top_y: 269 width: 151 height: 38)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/83.jpg: Predicted in 22.877000 milli-seconds.\nNumberPlate: 100%\t(left_x: 280 top_y: 330 width: 105 height: 25)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/84.jpg: Predicted in 22.898000 milli-seconds.\nNumberPlate: 100%\t(left_x: 275 top_y: 342 width: 113 height: 24)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/85.jpg: Predicted in 22.968000 milli-seconds.\nNumberPlate: 99%\t(left_x: 458 top_y: 275 width: 66 height: 40)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/86.jpg: Predicted in 23.143000 milli-seconds.\nNumberPlate: 100%\t(left_x: 280 top_y: 327 width: 145 height: 30)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/87.jpg: Predicted in 23.188000 milli-seconds.\nNumberPlate: 100%\t(left_x: 369 top_y: 165 width: 90 height: 30)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/88.jpg: Predicted in 22.769000 milli-seconds.\nNumberPlate: 100%\t(left_x: 368 top_y: 290 width: 119 height: 27)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/89.jpg: Predicted in 23.312000 milli-seconds.\nNumberPlate: 99%\t(left_x: 268 top_y: 413 width: 189 height: 49)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/9.jpg: Predicted in 23.059000 milli-seconds.\nNumberPlate: 100%\t(left_x: 370 top_y: 239 width: 137 height: 28)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/90.jpg: Predicted in 23.094000 milli-seconds.\nNumberPlate: 98%\t(left_x: 166 top_y: 261 width: 99 height: 44)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/91.jpg: Predicted in 22.881000 milli-seconds.\nNumberPlate: 85%\t(left_x: 217 top_y: 294 width: 48 height: 19)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/92.jpg: Predicted in 23.330000 milli-seconds.\nNumberPlate: 100%\t(left_x: 311 top_y: 348 width: 146 height: 45)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/93.jpg: Predicted in 23.076000 milli-seconds.\nNumberPlate: 97%\t(left_x: 178 top_y: 342 width: 116 height: 41)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/94.jpg: Predicted in 22.949000 milli-seconds.\nNumberPlate: 100%\t(left_x: 303 top_y: 306 width: 122 height: 27)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/95.jpg: Predicted in 23.132000 milli-seconds.\nNumberPlate: 100%\t(left_x: 510 top_y: 339 width: 129 height: 83)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/96.jpg: Predicted in 23.174000 milli-seconds.\nNumberPlate: 99%\t(left_x: 291 top_y: 309 width: 114 height: 26)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/97.jpg: Predicted in 22.950000 milli-seconds.\nNumberPlate: 100%\t(left_x: 248 top_y: 270 width: 90 height: 23)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/98.jpg: Predicted in 23.154000 milli-seconds.\nNumberPlate: 85%\t(left_x: 48 top_y: 353 width: 62 height: 71)\nEnter Image Path: Detection layer: 82 - type = 28 \n Detection layer: 94 - type = 28 \n Detection layer: 106 - type = 28 \ndata/img2/99.jpg: Predicted in 23.013000 milli-seconds.\nNumberPlate: 99%\t(left_x: 293 top_y: 383 width: 138 height: 30)\nEnter Image Path: " ], [ "import pandas as pd\nimport json\nwith open('result_vehicle_plates.json') as file:\n data=json.load(file)", "_____no_output_____" ] ], [ [ "#### 4. Coordinates DataFrame", "_____no_output_____" ] ], [ [ "dataset = pd.DataFrame(data)\ndataset", "_____no_output_____" ] ], [ [ "#### 5. Precision, Recall, F1-Score, Avg, map", "_____no_output_____" ] ], [ [ "ITERATION = \"2500\"\n!./darknet detector map data/obj.data yolo-obj.cfg yolo-obj_final_vehicle.weights -points 0 -iou_thresh 0.1 -thresh 0.2 > yolo-test_{ITERATION}.log\n!./darknet detector map data/obj.data yolo-obj.cfg yolo-obj_final_vehicle.weights -points 0 -iou_thresh 0.1 -thresh 0.15 >> yolo-test_{ITERATION}.log\n!./darknet detector map data/obj.data yolo-obj.cfg yolo-obj_final_vehicle.weights -points 0 -iou_thresh 0.1 -thresh 0.1 >> yolo-test_{ITERATION}.log\n!./darknet detector map data/obj.data yolo-obj.cfg yolo-obj_final_vehicle.weights -points 0 -iou_thresh 0.1 -thresh 0.05 >> yolo-test_{ITERATION}.log\n!./darknet detector map data/obj.data yolo-obj.cfg yolo-obj_final_vehicle.weights -points 0 -iou_thresh 0.05 -thresh 0.2 >> yolo-test_{ITERATION}.log\n!./darknet detector map data/obj.data yolo-obj.cfg yolo-obj_final_vehicle.weights -points 0 -iou_thresh 0.1 -thresh 0.2 >> yolo-test_{ITERATION}.log\n!./darknet detector map data/obj.data yolo-obj.cfg yolo-obj_final_vehicle.weights -points 0 -iou_thresh 0.15 -thresh 0.2 >> yolo-test_{ITERATION}.log\n!./darknet detector map data/obj.data yolo-obj.cfg yolo-obj_final_vehicle.weights -points 0 -iou_thresh 0.2 -thresh 0.2 >> yolo-test_{ITERATION}.log\n!./darknet detector map data/obj.data yolo-obj.cfg yolo-obj_final_vehicle.weights -points 0 -iou_thresh 0.3 -thresh 0.2 >> yolo-test_{ITERATION}.log\n!./darknet detector map data/obj.data yolo-obj.cfg yolo-obj_final_vehicle.weights -points 0 -iou_thresh 0.5 -thresh 0.2 >> yolo-test_{ITERATION}.log\n\nfrom subprocess import Popen, PIPE\ncmd = '''grep 'conf_thresh\\|mAP@' yolo-test_{}.log'''.format(ITERATION) + ''' | awk -F' ' 'BEGIN{l=\"conf,pre,rec,f1-score,avg_iou,iou_thr,map\"}{if($1==\"for\"){if($5==\"precision\"){l=l \"\\\\n\" $4 $7 $10 $13} else{l=l \",\" $17}} else{l=l \",\" $4 \",\" $8}}END{print l}' '''\np=Popen(cmd, shell=True, stdout=PIPE)\np.wait()\nprint(p.communicate()[0].decode())", " CUDA-version: 10010 (10010), cuDNN: 7.6.5, CUDNN_HALF=1, GPU count: 1 \n OpenCV isn't used - data augmentation will be slow \n 0 : compute_capability = 750, cudnn_half = 1, GPU: Tesla T4 \n layer filters size/strd(dil) input output\n 0 conv 32 3 x 3/ 1 608 x 608 x 3 -> 608 x 608 x 32 0.639 BF\n 1 conv 64 3 x 3/ 2 608 x 608 x 32 -> 304 x 304 x 64 3.407 BF\n 2 conv 32 1 x 1/ 1 304 x 304 x 64 -> 304 x 304 x 32 0.379 BF\n 3 conv 64 3 x 3/ 1 304 x 304 x 32 -> 304 x 304 x 64 3.407 BF\n 4 Shortcut Layer: 1, wt = 0, wn = 0, outputs: 304 x 304 x 64 0.006 BF\n 5 conv 128 3 x 3/ 2 304 x 304 x 64 -> 152 x 152 x 128 3.407 BF\n 6 conv 64 1 x 1/ 1 152 x 152 x 128 -> 152 x 152 x 64 0.379 BF\n 7 conv 128 3 x 3/ 1 152 x 152 x 64 -> 152 x 152 x 128 3.407 BF\n 8 Shortcut Layer: 5, wt = 0, wn = 0, outputs: 152 x 152 x 128 0.003 BF\n 9 conv 64 1 x 1/ 1 152 x 152 x 128 -> 152 x 152 x 64 0.379 BF\n 10 conv 128 3 x 3/ 1 152 x 152 x 64 -> 152 x 152 x 128 3.407 BF\n 11 Shortcut Layer: 8, wt = 0, wn = 0, outputs: 152 x 152 x 128 0.003 BF\n 12 conv 256 3 x 3/ 2 152 x 152 x 128 -> 76 x 76 x 256 3.407 BF\n 13 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 14 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 15 Shortcut Layer: 12, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 16 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 17 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 18 Shortcut Layer: 15, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 19 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 20 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 21 Shortcut Layer: 18, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 22 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 23 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 24 Shortcut Layer: 21, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 25 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 26 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 27 Shortcut Layer: 24, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 28 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 29 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 30 Shortcut Layer: 27, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 31 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 32 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 33 Shortcut Layer: 30, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 34 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 35 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 36 Shortcut Layer: 33, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 37 conv 512 3 x 3/ 2 76 x 76 x 256 -> 38 x 38 x 512 3.407 BF\n 38 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 39 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 40 Shortcut Layer: 37, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 41 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 42 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 43 Shortcut Layer: 40, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 44 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 45 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 46 Shortcut Layer: 43, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 47 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 48 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 49 Shortcut Layer: 46, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 50 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 51 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 52 Shortcut Layer: 49, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 53 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 54 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 55 Shortcut Layer: 52, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 56 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 57 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 58 Shortcut Layer: 55, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 59 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 60 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 61 Shortcut Layer: 58, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 62 conv 1024 3 x 3/ 2 38 x 38 x 512 -> 19 x 19 x1024 3.407 BF\n 63 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 64 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 65 Shortcut Layer: 62, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 66 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 67 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 68 Shortcut Layer: 65, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 69 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 70 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 71 Shortcut Layer: 68, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 72 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 73 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 74 Shortcut Layer: 71, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 75 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 76 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 77 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 78 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 79 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 80 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 81 conv 18 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 18 0.013 BF\n 82 yolo\n[yolo] params: iou loss: mse (2), iou_norm: 0.75, obj_norm: 1.00, cls_norm: 1.00, delta_norm: 1.00, scale_x_y: 1.00\n 83 route 79 \t\t -> 19 x 19 x 512 \n 84 conv 256 1 x 1/ 1 19 x 19 x 512 -> 19 x 19 x 256 0.095 BF\n 85 upsample 2x 19 x 19 x 256 -> 38 x 38 x 256\n 86 route 85 61 \t -> 38 x 38 x 768 \n 87 conv 256 1 x 1/ 1 38 x 38 x 768 -> 38 x 38 x 256 0.568 BF\n 88 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 89 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 90 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 91 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 92 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 93 conv 18 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 18 0.027 BF\n 94 yolo\n[yolo] params: iou loss: mse (2), iou_norm: 0.75, obj_norm: 1.00, cls_norm: 1.00, delta_norm: 1.00, scale_x_y: 1.00\n 95 route 91 \t\t -> 38 x 38 x 256 \n 96 conv 128 1 x 1/ 1 38 x 38 x 256 -> 38 x 38 x 128 0.095 BF\n 97 upsample 2x 38 x 38 x 128 -> 76 x 76 x 128\n 98 route 97 36 \t -> 76 x 76 x 384 \n 99 conv 128 1 x 1/ 1 76 x 76 x 384 -> 76 x 76 x 128 0.568 BF\n 100 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 101 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 102 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 103 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 104 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 105 conv 18 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 18 0.053 BF\n 106 yolo\n[yolo] params: iou loss: mse (2), iou_norm: 0.75, obj_norm: 1.00, cls_norm: 1.00, delta_norm: 1.00, scale_x_y: 1.00\nTotal BFLOPS 139.496 \navg_outputs = 1103769 \n Allocate additional workspace_size = 52.43 MB \nLoading weights from yolo-obj_final_vehicle.weights...Done! Loaded 107 layers from weights-file \n144Total Detection Time: 6 Seconds\n CUDA-version: 10010 (10010), cuDNN: 7.6.5, CUDNN_HALF=1, GPU count: 1 \n OpenCV isn't used - data augmentation will be slow \n 0 : compute_capability = 750, cudnn_half = 1, GPU: Tesla T4 \n layer filters size/strd(dil) input output\n 0 conv 32 3 x 3/ 1 608 x 608 x 3 -> 608 x 608 x 32 0.639 BF\n 1 conv 64 3 x 3/ 2 608 x 608 x 32 -> 304 x 304 x 64 3.407 BF\n 2 conv 32 1 x 1/ 1 304 x 304 x 64 -> 304 x 304 x 32 0.379 BF\n 3 conv 64 3 x 3/ 1 304 x 304 x 32 -> 304 x 304 x 64 3.407 BF\n 4 Shortcut Layer: 1, wt = 0, wn = 0, outputs: 304 x 304 x 64 0.006 BF\n 5 conv 128 3 x 3/ 2 304 x 304 x 64 -> 152 x 152 x 128 3.407 BF\n 6 conv 64 1 x 1/ 1 152 x 152 x 128 -> 152 x 152 x 64 0.379 BF\n 7 conv 128 3 x 3/ 1 152 x 152 x 64 -> 152 x 152 x 128 3.407 BF\n 8 Shortcut Layer: 5, wt = 0, wn = 0, outputs: 152 x 152 x 128 0.003 BF\n 9 conv 64 1 x 1/ 1 152 x 152 x 128 -> 152 x 152 x 64 0.379 BF\n 10 conv 128 3 x 3/ 1 152 x 152 x 64 -> 152 x 152 x 128 3.407 BF\n 11 Shortcut Layer: 8, wt = 0, wn = 0, outputs: 152 x 152 x 128 0.003 BF\n 12 conv 256 3 x 3/ 2 152 x 152 x 128 -> 76 x 76 x 256 3.407 BF\n 13 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 14 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 15 Shortcut Layer: 12, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 16 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 17 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 18 Shortcut Layer: 15, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 19 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 20 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 21 Shortcut Layer: 18, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 22 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 23 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 24 Shortcut Layer: 21, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 25 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 26 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 27 Shortcut Layer: 24, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 28 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 29 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 30 Shortcut Layer: 27, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 31 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 32 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 33 Shortcut Layer: 30, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 34 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 35 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 36 Shortcut Layer: 33, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 37 conv 512 3 x 3/ 2 76 x 76 x 256 -> 38 x 38 x 512 3.407 BF\n 38 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 39 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 40 Shortcut Layer: 37, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 41 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 42 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 43 Shortcut Layer: 40, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 44 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 45 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 46 Shortcut Layer: 43, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 47 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 48 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 49 Shortcut Layer: 46, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 50 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 51 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 52 Shortcut Layer: 49, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 53 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 54 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 55 Shortcut Layer: 52, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 56 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 57 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 58 Shortcut Layer: 55, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 59 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 60 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 61 Shortcut Layer: 58, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 62 conv 1024 3 x 3/ 2 38 x 38 x 512 -> 19 x 19 x1024 3.407 BF\n 63 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 64 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 65 Shortcut Layer: 62, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 66 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 67 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 68 Shortcut Layer: 65, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 69 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 70 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 71 Shortcut Layer: 68, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 72 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 73 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 74 Shortcut Layer: 71, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 75 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 76 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 77 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 78 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 79 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 80 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 81 conv 18 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 18 0.013 BF\n 82 yolo\n[yolo] params: iou loss: mse (2), iou_norm: 0.75, obj_norm: 1.00, cls_norm: 1.00, delta_norm: 1.00, scale_x_y: 1.00\n 83 route 79 \t\t -> 19 x 19 x 512 \n 84 conv 256 1 x 1/ 1 19 x 19 x 512 -> 19 x 19 x 256 0.095 BF\n 85 upsample 2x 19 x 19 x 256 -> 38 x 38 x 256\n 86 route 85 61 \t -> 38 x 38 x 768 \n 87 conv 256 1 x 1/ 1 38 x 38 x 768 -> 38 x 38 x 256 0.568 BF\n 88 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 89 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 90 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 91 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 92 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 93 conv 18 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 18 0.027 BF\n 94 yolo\n[yolo] params: iou loss: mse (2), iou_norm: 0.75, obj_norm: 1.00, cls_norm: 1.00, delta_norm: 1.00, scale_x_y: 1.00\n 95 route 91 \t\t -> 38 x 38 x 256 \n 96 conv 128 1 x 1/ 1 38 x 38 x 256 -> 38 x 38 x 128 0.095 BF\n 97 upsample 2x 38 x 38 x 128 -> 76 x 76 x 128\n 98 route 97 36 \t -> 76 x 76 x 384 \n 99 conv 128 1 x 1/ 1 76 x 76 x 384 -> 76 x 76 x 128 0.568 BF\n 100 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 101 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 102 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 103 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 104 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 105 conv 18 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 18 0.053 BF\n 106 yolo\n[yolo] params: iou loss: mse (2), iou_norm: 0.75, obj_norm: 1.00, cls_norm: 1.00, delta_norm: 1.00, scale_x_y: 1.00\nTotal BFLOPS 139.496 \navg_outputs = 1103769 \n Allocate additional workspace_size = 52.43 MB \nLoading weights from yolo-obj_final_vehicle.weights...Done! Loaded 107 layers from weights-file \n144Total Detection Time: 6 Seconds\n CUDA-version: 10010 (10010), cuDNN: 7.6.5, CUDNN_HALF=1, GPU count: 1 \n OpenCV isn't used - data augmentation will be slow \n 0 : compute_capability = 750, cudnn_half = 1, GPU: Tesla T4 \n layer filters size/strd(dil) input output\n 0 conv 32 3 x 3/ 1 608 x 608 x 3 -> 608 x 608 x 32 0.639 BF\n 1 conv 64 3 x 3/ 2 608 x 608 x 32 -> 304 x 304 x 64 3.407 BF\n 2 conv 32 1 x 1/ 1 304 x 304 x 64 -> 304 x 304 x 32 0.379 BF\n 3 conv 64 3 x 3/ 1 304 x 304 x 32 -> 304 x 304 x 64 3.407 BF\n 4 Shortcut Layer: 1, wt = 0, wn = 0, outputs: 304 x 304 x 64 0.006 BF\n 5 conv 128 3 x 3/ 2 304 x 304 x 64 -> 152 x 152 x 128 3.407 BF\n 6 conv 64 1 x 1/ 1 152 x 152 x 128 -> 152 x 152 x 64 0.379 BF\n 7 conv 128 3 x 3/ 1 152 x 152 x 64 -> 152 x 152 x 128 3.407 BF\n 8 Shortcut Layer: 5, wt = 0, wn = 0, outputs: 152 x 152 x 128 0.003 BF\n 9 conv 64 1 x 1/ 1 152 x 152 x 128 -> 152 x 152 x 64 0.379 BF\n 10 conv 128 3 x 3/ 1 152 x 152 x 64 -> 152 x 152 x 128 3.407 BF\n 11 Shortcut Layer: 8, wt = 0, wn = 0, outputs: 152 x 152 x 128 0.003 BF\n 12 conv 256 3 x 3/ 2 152 x 152 x 128 -> 76 x 76 x 256 3.407 BF\n 13 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 14 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 15 Shortcut Layer: 12, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 16 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 17 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 18 Shortcut Layer: 15, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 19 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 20 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 21 Shortcut Layer: 18, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 22 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 23 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 24 Shortcut Layer: 21, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 25 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 26 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 27 Shortcut Layer: 24, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 28 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 29 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 30 Shortcut Layer: 27, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 31 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 32 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 33 Shortcut Layer: 30, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 34 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 35 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 36 Shortcut Layer: 33, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 37 conv 512 3 x 3/ 2 76 x 76 x 256 -> 38 x 38 x 512 3.407 BF\n 38 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 39 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 40 Shortcut Layer: 37, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 41 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 42 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 43 Shortcut Layer: 40, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 44 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 45 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 46 Shortcut Layer: 43, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 47 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 48 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 49 Shortcut Layer: 46, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 50 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 51 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 52 Shortcut Layer: 49, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 53 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 54 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 55 Shortcut Layer: 52, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 56 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 57 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 58 Shortcut Layer: 55, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 59 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 60 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 61 Shortcut Layer: 58, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 62 conv 1024 3 x 3/ 2 38 x 38 x 512 -> 19 x 19 x1024 3.407 BF\n 63 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 64 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 65 Shortcut Layer: 62, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 66 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 67 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 68 Shortcut Layer: 65, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 69 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 70 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 71 Shortcut Layer: 68, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 72 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 73 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 74 Shortcut Layer: 71, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 75 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 76 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 77 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 78 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 79 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 80 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 81 conv 18 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 18 0.013 BF\n 82 yolo\n[yolo] params: iou loss: mse (2), iou_norm: 0.75, obj_norm: 1.00, cls_norm: 1.00, delta_norm: 1.00, scale_x_y: 1.00\n 83 route 79 \t\t -> 19 x 19 x 512 \n 84 conv 256 1 x 1/ 1 19 x 19 x 512 -> 19 x 19 x 256 0.095 BF\n 85 upsample 2x 19 x 19 x 256 -> 38 x 38 x 256\n 86 route 85 61 \t -> 38 x 38 x 768 \n 87 conv 256 1 x 1/ 1 38 x 38 x 768 -> 38 x 38 x 256 0.568 BF\n 88 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 89 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 90 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 91 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 92 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 93 conv 18 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 18 0.027 BF\n 94 yolo\n[yolo] params: iou loss: mse (2), iou_norm: 0.75, obj_norm: 1.00, cls_norm: 1.00, delta_norm: 1.00, scale_x_y: 1.00\n 95 route 91 \t\t -> 38 x 38 x 256 \n 96 conv 128 1 x 1/ 1 38 x 38 x 256 -> 38 x 38 x 128 0.095 BF\n 97 upsample 2x 38 x 38 x 128 -> 76 x 76 x 128\n 98 route 97 36 \t -> 76 x 76 x 384 \n 99 conv 128 1 x 1/ 1 76 x 76 x 384 -> 76 x 76 x 128 0.568 BF\n 100 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 101 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 102 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 103 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 104 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 105 conv 18 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 18 0.053 BF\n 106 yolo\n[yolo] params: iou loss: mse (2), iou_norm: 0.75, obj_norm: 1.00, cls_norm: 1.00, delta_norm: 1.00, scale_x_y: 1.00\nTotal BFLOPS 139.496 \navg_outputs = 1103769 \n Allocate additional workspace_size = 52.43 MB \nLoading weights from yolo-obj_final_vehicle.weights...Done! Loaded 107 layers from weights-file \n144Total Detection Time: 6 Seconds\n CUDA-version: 10010 (10010), cuDNN: 7.6.5, CUDNN_HALF=1, GPU count: 1 \n OpenCV isn't used - data augmentation will be slow \n 0 : compute_capability = 750, cudnn_half = 1, GPU: Tesla T4 \n layer filters size/strd(dil) input output\n 0 conv 32 3 x 3/ 1 608 x 608 x 3 -> 608 x 608 x 32 0.639 BF\n 1 conv 64 3 x 3/ 2 608 x 608 x 32 -> 304 x 304 x 64 3.407 BF\n 2 conv 32 1 x 1/ 1 304 x 304 x 64 -> 304 x 304 x 32 0.379 BF\n 3 conv 64 3 x 3/ 1 304 x 304 x 32 -> 304 x 304 x 64 3.407 BF\n 4 Shortcut Layer: 1, wt = 0, wn = 0, outputs: 304 x 304 x 64 0.006 BF\n 5 conv 128 3 x 3/ 2 304 x 304 x 64 -> 152 x 152 x 128 3.407 BF\n 6 conv 64 1 x 1/ 1 152 x 152 x 128 -> 152 x 152 x 64 0.379 BF\n 7 conv 128 3 x 3/ 1 152 x 152 x 64 -> 152 x 152 x 128 3.407 BF\n 8 Shortcut Layer: 5, wt = 0, wn = 0, outputs: 152 x 152 x 128 0.003 BF\n 9 conv 64 1 x 1/ 1 152 x 152 x 128 -> 152 x 152 x 64 0.379 BF\n 10 conv 128 3 x 3/ 1 152 x 152 x 64 -> 152 x 152 x 128 3.407 BF\n 11 Shortcut Layer: 8, wt = 0, wn = 0, outputs: 152 x 152 x 128 0.003 BF\n 12 conv 256 3 x 3/ 2 152 x 152 x 128 -> 76 x 76 x 256 3.407 BF\n 13 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 14 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 15 Shortcut Layer: 12, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 16 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 17 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 18 Shortcut Layer: 15, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 19 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 20 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 21 Shortcut Layer: 18, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 22 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 23 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 24 Shortcut Layer: 21, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 25 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 26 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 27 Shortcut Layer: 24, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 28 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 29 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 30 Shortcut Layer: 27, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 31 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 32 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 33 Shortcut Layer: 30, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 34 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 35 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 36 Shortcut Layer: 33, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 37 conv 512 3 x 3/ 2 76 x 76 x 256 -> 38 x 38 x 512 3.407 BF\n 38 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 39 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 40 Shortcut Layer: 37, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 41 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 42 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 43 Shortcut Layer: 40, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 44 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 45 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 46 Shortcut Layer: 43, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 47 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 48 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 49 Shortcut Layer: 46, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 50 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 51 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 52 Shortcut Layer: 49, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 53 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 54 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 55 Shortcut Layer: 52, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 56 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 57 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 58 Shortcut Layer: 55, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 59 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 60 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 61 Shortcut Layer: 58, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 62 conv 1024 3 x 3/ 2 38 x 38 x 512 -> 19 x 19 x1024 3.407 BF\n 63 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 64 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 65 Shortcut Layer: 62, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 66 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 67 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 68 Shortcut Layer: 65, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 69 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 70 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 71 Shortcut Layer: 68, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 72 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 73 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 74 Shortcut Layer: 71, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 75 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 76 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 77 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 78 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 79 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 80 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 81 conv 18 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 18 0.013 BF\n 82 yolo\n[yolo] params: iou loss: mse (2), iou_norm: 0.75, obj_norm: 1.00, cls_norm: 1.00, delta_norm: 1.00, scale_x_y: 1.00\n 83 route 79 \t\t -> 19 x 19 x 512 \n 84 conv 256 1 x 1/ 1 19 x 19 x 512 -> 19 x 19 x 256 0.095 BF\n 85 upsample 2x 19 x 19 x 256 -> 38 x 38 x 256\n 86 route 85 61 \t -> 38 x 38 x 768 \n 87 conv 256 1 x 1/ 1 38 x 38 x 768 -> 38 x 38 x 256 0.568 BF\n 88 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 89 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 90 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 91 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 92 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 93 conv 18 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 18 0.027 BF\n 94 yolo\n[yolo] params: iou loss: mse (2), iou_norm: 0.75, obj_norm: 1.00, cls_norm: 1.00, delta_norm: 1.00, scale_x_y: 1.00\n 95 route 91 \t\t -> 38 x 38 x 256 \n 96 conv 128 1 x 1/ 1 38 x 38 x 256 -> 38 x 38 x 128 0.095 BF\n 97 upsample 2x 38 x 38 x 128 -> 76 x 76 x 128\n 98 route 97 36 \t -> 76 x 76 x 384 \n 99 conv 128 1 x 1/ 1 76 x 76 x 384 -> 76 x 76 x 128 0.568 BF\n 100 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 101 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 102 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 103 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 104 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 105 conv 18 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 18 0.053 BF\n 106 yolo\n[yolo] params: iou loss: mse (2), iou_norm: 0.75, obj_norm: 1.00, cls_norm: 1.00, delta_norm: 1.00, scale_x_y: 1.00\nTotal BFLOPS 139.496 \navg_outputs = 1103769 \n Allocate additional workspace_size = 52.43 MB \nLoading weights from yolo-obj_final_vehicle.weights...Done! Loaded 107 layers from weights-file \n144Total Detection Time: 6 Seconds\n CUDA-version: 10010 (10010), cuDNN: 7.6.5, CUDNN_HALF=1, GPU count: 1 \n OpenCV isn't used - data augmentation will be slow \n 0 : compute_capability = 750, cudnn_half = 1, GPU: Tesla T4 \n layer filters size/strd(dil) input output\n 0 conv 32 3 x 3/ 1 608 x 608 x 3 -> 608 x 608 x 32 0.639 BF\n 1 conv 64 3 x 3/ 2 608 x 608 x 32 -> 304 x 304 x 64 3.407 BF\n 2 conv 32 1 x 1/ 1 304 x 304 x 64 -> 304 x 304 x 32 0.379 BF\n 3 conv 64 3 x 3/ 1 304 x 304 x 32 -> 304 x 304 x 64 3.407 BF\n 4 Shortcut Layer: 1, wt = 0, wn = 0, outputs: 304 x 304 x 64 0.006 BF\n 5 conv 128 3 x 3/ 2 304 x 304 x 64 -> 152 x 152 x 128 3.407 BF\n 6 conv 64 1 x 1/ 1 152 x 152 x 128 -> 152 x 152 x 64 0.379 BF\n 7 conv 128 3 x 3/ 1 152 x 152 x 64 -> 152 x 152 x 128 3.407 BF\n 8 Shortcut Layer: 5, wt = 0, wn = 0, outputs: 152 x 152 x 128 0.003 BF\n 9 conv 64 1 x 1/ 1 152 x 152 x 128 -> 152 x 152 x 64 0.379 BF\n 10 conv 128 3 x 3/ 1 152 x 152 x 64 -> 152 x 152 x 128 3.407 BF\n 11 Shortcut Layer: 8, wt = 0, wn = 0, outputs: 152 x 152 x 128 0.003 BF\n 12 conv 256 3 x 3/ 2 152 x 152 x 128 -> 76 x 76 x 256 3.407 BF\n 13 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 14 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 15 Shortcut Layer: 12, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 16 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 17 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 18 Shortcut Layer: 15, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 19 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 20 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 21 Shortcut Layer: 18, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 22 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 23 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 24 Shortcut Layer: 21, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 25 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 26 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 27 Shortcut Layer: 24, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 28 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 29 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 30 Shortcut Layer: 27, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 31 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 32 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 33 Shortcut Layer: 30, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 34 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 35 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 36 Shortcut Layer: 33, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 37 conv 512 3 x 3/ 2 76 x 76 x 256 -> 38 x 38 x 512 3.407 BF\n 38 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 39 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 40 Shortcut Layer: 37, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 41 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 42 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 43 Shortcut Layer: 40, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 44 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 45 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 46 Shortcut Layer: 43, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 47 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 48 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 49 Shortcut Layer: 46, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 50 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 51 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 52 Shortcut Layer: 49, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 53 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 54 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 55 Shortcut Layer: 52, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 56 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 57 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 58 Shortcut Layer: 55, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 59 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 60 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 61 Shortcut Layer: 58, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 62 conv 1024 3 x 3/ 2 38 x 38 x 512 -> 19 x 19 x1024 3.407 BF\n 63 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 64 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 65 Shortcut Layer: 62, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 66 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 67 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 68 Shortcut Layer: 65, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 69 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 70 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 71 Shortcut Layer: 68, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 72 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 73 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 74 Shortcut Layer: 71, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 75 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 76 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 77 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 78 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 79 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 80 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 81 conv 18 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 18 0.013 BF\n 82 yolo\n[yolo] params: iou loss: mse (2), iou_norm: 0.75, obj_norm: 1.00, cls_norm: 1.00, delta_norm: 1.00, scale_x_y: 1.00\n 83 route 79 \t\t -> 19 x 19 x 512 \n 84 conv 256 1 x 1/ 1 19 x 19 x 512 -> 19 x 19 x 256 0.095 BF\n 85 upsample 2x 19 x 19 x 256 -> 38 x 38 x 256\n 86 route 85 61 \t -> 38 x 38 x 768 \n 87 conv 256 1 x 1/ 1 38 x 38 x 768 -> 38 x 38 x 256 0.568 BF\n 88 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 89 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 90 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 91 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 92 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 93 conv 18 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 18 0.027 BF\n 94 yolo\n[yolo] params: iou loss: mse (2), iou_norm: 0.75, obj_norm: 1.00, cls_norm: 1.00, delta_norm: 1.00, scale_x_y: 1.00\n 95 route 91 \t\t -> 38 x 38 x 256 \n 96 conv 128 1 x 1/ 1 38 x 38 x 256 -> 38 x 38 x 128 0.095 BF\n 97 upsample 2x 38 x 38 x 128 -> 76 x 76 x 128\n 98 route 97 36 \t -> 76 x 76 x 384 \n 99 conv 128 1 x 1/ 1 76 x 76 x 384 -> 76 x 76 x 128 0.568 BF\n 100 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 101 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 102 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 103 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 104 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 105 conv 18 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 18 0.053 BF\n 106 yolo\n[yolo] params: iou loss: mse (2), iou_norm: 0.75, obj_norm: 1.00, cls_norm: 1.00, delta_norm: 1.00, scale_x_y: 1.00\nTotal BFLOPS 139.496 \navg_outputs = 1103769 \n Allocate additional workspace_size = 52.43 MB \nLoading weights from yolo-obj_final_vehicle.weights...Done! Loaded 107 layers from weights-file \n144Total Detection Time: 7 Seconds\n CUDA-version: 10010 (10010), cuDNN: 7.6.5, CUDNN_HALF=1, GPU count: 1 \n OpenCV isn't used - data augmentation will be slow \n 0 : compute_capability = 750, cudnn_half = 1, GPU: Tesla T4 \n layer filters size/strd(dil) input output\n 0 conv 32 3 x 3/ 1 608 x 608 x 3 -> 608 x 608 x 32 0.639 BF\n 1 conv 64 3 x 3/ 2 608 x 608 x 32 -> 304 x 304 x 64 3.407 BF\n 2 conv 32 1 x 1/ 1 304 x 304 x 64 -> 304 x 304 x 32 0.379 BF\n 3 conv 64 3 x 3/ 1 304 x 304 x 32 -> 304 x 304 x 64 3.407 BF\n 4 Shortcut Layer: 1, wt = 0, wn = 0, outputs: 304 x 304 x 64 0.006 BF\n 5 conv 128 3 x 3/ 2 304 x 304 x 64 -> 152 x 152 x 128 3.407 BF\n 6 conv 64 1 x 1/ 1 152 x 152 x 128 -> 152 x 152 x 64 0.379 BF\n 7 conv 128 3 x 3/ 1 152 x 152 x 64 -> 152 x 152 x 128 3.407 BF\n 8 Shortcut Layer: 5, wt = 0, wn = 0, outputs: 152 x 152 x 128 0.003 BF\n 9 conv 64 1 x 1/ 1 152 x 152 x 128 -> 152 x 152 x 64 0.379 BF\n 10 conv 128 3 x 3/ 1 152 x 152 x 64 -> 152 x 152 x 128 3.407 BF\n 11 Shortcut Layer: 8, wt = 0, wn = 0, outputs: 152 x 152 x 128 0.003 BF\n 12 conv 256 3 x 3/ 2 152 x 152 x 128 -> 76 x 76 x 256 3.407 BF\n 13 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 14 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 15 Shortcut Layer: 12, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 16 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 17 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 18 Shortcut Layer: 15, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 19 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 20 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 21 Shortcut Layer: 18, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 22 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 23 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 24 Shortcut Layer: 21, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 25 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 26 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 27 Shortcut Layer: 24, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 28 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 29 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 30 Shortcut Layer: 27, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 31 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 32 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 33 Shortcut Layer: 30, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 34 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 35 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 36 Shortcut Layer: 33, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 37 conv 512 3 x 3/ 2 76 x 76 x 256 -> 38 x 38 x 512 3.407 BF\n 38 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 39 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 40 Shortcut Layer: 37, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 41 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 42 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 43 Shortcut Layer: 40, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 44 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 45 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 46 Shortcut Layer: 43, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 47 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 48 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 49 Shortcut Layer: 46, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 50 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 51 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 52 Shortcut Layer: 49, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 53 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 54 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 55 Shortcut Layer: 52, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 56 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 57 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 58 Shortcut Layer: 55, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 59 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 60 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 61 Shortcut Layer: 58, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 62 conv 1024 3 x 3/ 2 38 x 38 x 512 -> 19 x 19 x1024 3.407 BF\n 63 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 64 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 65 Shortcut Layer: 62, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 66 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 67 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 68 Shortcut Layer: 65, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 69 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 70 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 71 Shortcut Layer: 68, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 72 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 73 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 74 Shortcut Layer: 71, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 75 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 76 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 77 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 78 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 79 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 80 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 81 conv 18 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 18 0.013 BF\n 82 yolo\n[yolo] params: iou loss: mse (2), iou_norm: 0.75, obj_norm: 1.00, cls_norm: 1.00, delta_norm: 1.00, scale_x_y: 1.00\n 83 route 79 \t\t -> 19 x 19 x 512 \n 84 conv 256 1 x 1/ 1 19 x 19 x 512 -> 19 x 19 x 256 0.095 BF\n 85 upsample 2x 19 x 19 x 256 -> 38 x 38 x 256\n 86 route 85 61 \t -> 38 x 38 x 768 \n 87 conv 256 1 x 1/ 1 38 x 38 x 768 -> 38 x 38 x 256 0.568 BF\n 88 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 89 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 90 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 91 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 92 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 93 conv 18 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 18 0.027 BF\n 94 yolo\n[yolo] params: iou loss: mse (2), iou_norm: 0.75, obj_norm: 1.00, cls_norm: 1.00, delta_norm: 1.00, scale_x_y: 1.00\n 95 route 91 \t\t -> 38 x 38 x 256 \n 96 conv 128 1 x 1/ 1 38 x 38 x 256 -> 38 x 38 x 128 0.095 BF\n 97 upsample 2x 38 x 38 x 128 -> 76 x 76 x 128\n 98 route 97 36 \t -> 76 x 76 x 384 \n 99 conv 128 1 x 1/ 1 76 x 76 x 384 -> 76 x 76 x 128 0.568 BF\n 100 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 101 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 102 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 103 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 104 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 105 conv 18 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 18 0.053 BF\n 106 yolo\n[yolo] params: iou loss: mse (2), iou_norm: 0.75, obj_norm: 1.00, cls_norm: 1.00, delta_norm: 1.00, scale_x_y: 1.00\nTotal BFLOPS 139.496 \navg_outputs = 1103769 \n Allocate additional workspace_size = 52.43 MB \nLoading weights from yolo-obj_final_vehicle.weights...Done! Loaded 107 layers from weights-file \n144Total Detection Time: 6 Seconds\n CUDA-version: 10010 (10010), cuDNN: 7.6.5, CUDNN_HALF=1, GPU count: 1 \n OpenCV isn't used - data augmentation will be slow \n 0 : compute_capability = 750, cudnn_half = 1, GPU: Tesla T4 \n layer filters size/strd(dil) input output\n 0 conv 32 3 x 3/ 1 608 x 608 x 3 -> 608 x 608 x 32 0.639 BF\n 1 conv 64 3 x 3/ 2 608 x 608 x 32 -> 304 x 304 x 64 3.407 BF\n 2 conv 32 1 x 1/ 1 304 x 304 x 64 -> 304 x 304 x 32 0.379 BF\n 3 conv 64 3 x 3/ 1 304 x 304 x 32 -> 304 x 304 x 64 3.407 BF\n 4 Shortcut Layer: 1, wt = 0, wn = 0, outputs: 304 x 304 x 64 0.006 BF\n 5 conv 128 3 x 3/ 2 304 x 304 x 64 -> 152 x 152 x 128 3.407 BF\n 6 conv 64 1 x 1/ 1 152 x 152 x 128 -> 152 x 152 x 64 0.379 BF\n 7 conv 128 3 x 3/ 1 152 x 152 x 64 -> 152 x 152 x 128 3.407 BF\n 8 Shortcut Layer: 5, wt = 0, wn = 0, outputs: 152 x 152 x 128 0.003 BF\n 9 conv 64 1 x 1/ 1 152 x 152 x 128 -> 152 x 152 x 64 0.379 BF\n 10 conv 128 3 x 3/ 1 152 x 152 x 64 -> 152 x 152 x 128 3.407 BF\n 11 Shortcut Layer: 8, wt = 0, wn = 0, outputs: 152 x 152 x 128 0.003 BF\n 12 conv 256 3 x 3/ 2 152 x 152 x 128 -> 76 x 76 x 256 3.407 BF\n 13 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 14 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 15 Shortcut Layer: 12, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 16 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 17 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 18 Shortcut Layer: 15, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 19 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 20 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 21 Shortcut Layer: 18, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 22 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 23 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 24 Shortcut Layer: 21, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 25 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 26 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 27 Shortcut Layer: 24, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 28 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 29 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 30 Shortcut Layer: 27, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 31 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 32 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 33 Shortcut Layer: 30, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 34 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 35 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 36 Shortcut Layer: 33, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 37 conv 512 3 x 3/ 2 76 x 76 x 256 -> 38 x 38 x 512 3.407 BF\n 38 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 39 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 40 Shortcut Layer: 37, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 41 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 42 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 43 Shortcut Layer: 40, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 44 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 45 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 46 Shortcut Layer: 43, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 47 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 48 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 49 Shortcut Layer: 46, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 50 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 51 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 52 Shortcut Layer: 49, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 53 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 54 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 55 Shortcut Layer: 52, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 56 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 57 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 58 Shortcut Layer: 55, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 59 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 60 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 61 Shortcut Layer: 58, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 62 conv 1024 3 x 3/ 2 38 x 38 x 512 -> 19 x 19 x1024 3.407 BF\n 63 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 64 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 65 Shortcut Layer: 62, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 66 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 67 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 68 Shortcut Layer: 65, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 69 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 70 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 71 Shortcut Layer: 68, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 72 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 73 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 74 Shortcut Layer: 71, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 75 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 76 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 77 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 78 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 79 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 80 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 81 conv 18 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 18 0.013 BF\n 82 yolo\n[yolo] params: iou loss: mse (2), iou_norm: 0.75, obj_norm: 1.00, cls_norm: 1.00, delta_norm: 1.00, scale_x_y: 1.00\n 83 route 79 \t\t -> 19 x 19 x 512 \n 84 conv 256 1 x 1/ 1 19 x 19 x 512 -> 19 x 19 x 256 0.095 BF\n 85 upsample 2x 19 x 19 x 256 -> 38 x 38 x 256\n 86 route 85 61 \t -> 38 x 38 x 768 \n 87 conv 256 1 x 1/ 1 38 x 38 x 768 -> 38 x 38 x 256 0.568 BF\n 88 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 89 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 90 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 91 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 92 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 93 conv 18 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 18 0.027 BF\n 94 yolo\n[yolo] params: iou loss: mse (2), iou_norm: 0.75, obj_norm: 1.00, cls_norm: 1.00, delta_norm: 1.00, scale_x_y: 1.00\n 95 route 91 \t\t -> 38 x 38 x 256 \n 96 conv 128 1 x 1/ 1 38 x 38 x 256 -> 38 x 38 x 128 0.095 BF\n 97 upsample 2x 38 x 38 x 128 -> 76 x 76 x 128\n 98 route 97 36 \t -> 76 x 76 x 384 \n 99 conv 128 1 x 1/ 1 76 x 76 x 384 -> 76 x 76 x 128 0.568 BF\n 100 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 101 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 102 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 103 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 104 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 105 conv 18 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 18 0.053 BF\n 106 yolo\n[yolo] params: iou loss: mse (2), iou_norm: 0.75, obj_norm: 1.00, cls_norm: 1.00, delta_norm: 1.00, scale_x_y: 1.00\nTotal BFLOPS 139.496 \navg_outputs = 1103769 \n Allocate additional workspace_size = 52.43 MB \nLoading weights from yolo-obj_final_vehicle.weights...Done! Loaded 107 layers from weights-file \n144Total Detection Time: 6 Seconds\n CUDA-version: 10010 (10010), cuDNN: 7.6.5, CUDNN_HALF=1, GPU count: 1 \n OpenCV isn't used - data augmentation will be slow \n 0 : compute_capability = 750, cudnn_half = 1, GPU: Tesla T4 \n layer filters size/strd(dil) input output\n 0 conv 32 3 x 3/ 1 608 x 608 x 3 -> 608 x 608 x 32 0.639 BF\n 1 conv 64 3 x 3/ 2 608 x 608 x 32 -> 304 x 304 x 64 3.407 BF\n 2 conv 32 1 x 1/ 1 304 x 304 x 64 -> 304 x 304 x 32 0.379 BF\n 3 conv 64 3 x 3/ 1 304 x 304 x 32 -> 304 x 304 x 64 3.407 BF\n 4 Shortcut Layer: 1, wt = 0, wn = 0, outputs: 304 x 304 x 64 0.006 BF\n 5 conv 128 3 x 3/ 2 304 x 304 x 64 -> 152 x 152 x 128 3.407 BF\n 6 conv 64 1 x 1/ 1 152 x 152 x 128 -> 152 x 152 x 64 0.379 BF\n 7 conv 128 3 x 3/ 1 152 x 152 x 64 -> 152 x 152 x 128 3.407 BF\n 8 Shortcut Layer: 5, wt = 0, wn = 0, outputs: 152 x 152 x 128 0.003 BF\n 9 conv 64 1 x 1/ 1 152 x 152 x 128 -> 152 x 152 x 64 0.379 BF\n 10 conv 128 3 x 3/ 1 152 x 152 x 64 -> 152 x 152 x 128 3.407 BF\n 11 Shortcut Layer: 8, wt = 0, wn = 0, outputs: 152 x 152 x 128 0.003 BF\n 12 conv 256 3 x 3/ 2 152 x 152 x 128 -> 76 x 76 x 256 3.407 BF\n 13 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 14 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 15 Shortcut Layer: 12, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 16 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 17 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 18 Shortcut Layer: 15, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 19 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 20 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 21 Shortcut Layer: 18, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 22 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 23 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 24 Shortcut Layer: 21, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 25 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 26 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 27 Shortcut Layer: 24, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 28 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 29 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 30 Shortcut Layer: 27, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 31 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 32 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 33 Shortcut Layer: 30, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 34 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 35 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 36 Shortcut Layer: 33, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 37 conv 512 3 x 3/ 2 76 x 76 x 256 -> 38 x 38 x 512 3.407 BF\n 38 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 39 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 40 Shortcut Layer: 37, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 41 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 42 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 43 Shortcut Layer: 40, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 44 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 45 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 46 Shortcut Layer: 43, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 47 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 48 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 49 Shortcut Layer: 46, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 50 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 51 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 52 Shortcut Layer: 49, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 53 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 54 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 55 Shortcut Layer: 52, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 56 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 57 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 58 Shortcut Layer: 55, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 59 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 60 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 61 Shortcut Layer: 58, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 62 conv 1024 3 x 3/ 2 38 x 38 x 512 -> 19 x 19 x1024 3.407 BF\n 63 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 64 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 65 Shortcut Layer: 62, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 66 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 67 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 68 Shortcut Layer: 65, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 69 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 70 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 71 Shortcut Layer: 68, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 72 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 73 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 74 Shortcut Layer: 71, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 75 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 76 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 77 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 78 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 79 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 80 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 81 conv 18 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 18 0.013 BF\n 82 yolo\n[yolo] params: iou loss: mse (2), iou_norm: 0.75, obj_norm: 1.00, cls_norm: 1.00, delta_norm: 1.00, scale_x_y: 1.00\n 83 route 79 \t\t -> 19 x 19 x 512 \n 84 conv 256 1 x 1/ 1 19 x 19 x 512 -> 19 x 19 x 256 0.095 BF\n 85 upsample 2x 19 x 19 x 256 -> 38 x 38 x 256\n 86 route 85 61 \t -> 38 x 38 x 768 \n 87 conv 256 1 x 1/ 1 38 x 38 x 768 -> 38 x 38 x 256 0.568 BF\n 88 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 89 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 90 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 91 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 92 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 93 conv 18 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 18 0.027 BF\n 94 yolo\n[yolo] params: iou loss: mse (2), iou_norm: 0.75, obj_norm: 1.00, cls_norm: 1.00, delta_norm: 1.00, scale_x_y: 1.00\n 95 route 91 \t\t -> 38 x 38 x 256 \n 96 conv 128 1 x 1/ 1 38 x 38 x 256 -> 38 x 38 x 128 0.095 BF\n 97 upsample 2x 38 x 38 x 128 -> 76 x 76 x 128\n 98 route 97 36 \t -> 76 x 76 x 384 \n 99 conv 128 1 x 1/ 1 76 x 76 x 384 -> 76 x 76 x 128 0.568 BF\n 100 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 101 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 102 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 103 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 104 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 105 conv 18 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 18 0.053 BF\n 106 yolo\n[yolo] params: iou loss: mse (2), iou_norm: 0.75, obj_norm: 1.00, cls_norm: 1.00, delta_norm: 1.00, scale_x_y: 1.00\nTotal BFLOPS 139.496 \navg_outputs = 1103769 \n Allocate additional workspace_size = 52.43 MB \nLoading weights from yolo-obj_final_vehicle.weights...Done! Loaded 107 layers from weights-file \n144Total Detection Time: 7 Seconds\n CUDA-version: 10010 (10010), cuDNN: 7.6.5, CUDNN_HALF=1, GPU count: 1 \n OpenCV isn't used - data augmentation will be slow \n 0 : compute_capability = 750, cudnn_half = 1, GPU: Tesla T4 \n layer filters size/strd(dil) input output\n 0 conv 32 3 x 3/ 1 608 x 608 x 3 -> 608 x 608 x 32 0.639 BF\n 1 conv 64 3 x 3/ 2 608 x 608 x 32 -> 304 x 304 x 64 3.407 BF\n 2 conv 32 1 x 1/ 1 304 x 304 x 64 -> 304 x 304 x 32 0.379 BF\n 3 conv 64 3 x 3/ 1 304 x 304 x 32 -> 304 x 304 x 64 3.407 BF\n 4 Shortcut Layer: 1, wt = 0, wn = 0, outputs: 304 x 304 x 64 0.006 BF\n 5 conv 128 3 x 3/ 2 304 x 304 x 64 -> 152 x 152 x 128 3.407 BF\n 6 conv 64 1 x 1/ 1 152 x 152 x 128 -> 152 x 152 x 64 0.379 BF\n 7 conv 128 3 x 3/ 1 152 x 152 x 64 -> 152 x 152 x 128 3.407 BF\n 8 Shortcut Layer: 5, wt = 0, wn = 0, outputs: 152 x 152 x 128 0.003 BF\n 9 conv 64 1 x 1/ 1 152 x 152 x 128 -> 152 x 152 x 64 0.379 BF\n 10 conv 128 3 x 3/ 1 152 x 152 x 64 -> 152 x 152 x 128 3.407 BF\n 11 Shortcut Layer: 8, wt = 0, wn = 0, outputs: 152 x 152 x 128 0.003 BF\n 12 conv 256 3 x 3/ 2 152 x 152 x 128 -> 76 x 76 x 256 3.407 BF\n 13 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 14 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 15 Shortcut Layer: 12, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 16 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 17 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 18 Shortcut Layer: 15, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 19 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 20 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 21 Shortcut Layer: 18, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 22 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 23 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 24 Shortcut Layer: 21, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 25 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 26 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 27 Shortcut Layer: 24, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 28 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 29 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 30 Shortcut Layer: 27, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 31 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 32 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 33 Shortcut Layer: 30, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 34 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 35 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 36 Shortcut Layer: 33, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 37 conv 512 3 x 3/ 2 76 x 76 x 256 -> 38 x 38 x 512 3.407 BF\n 38 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 39 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 40 Shortcut Layer: 37, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 41 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 42 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 43 Shortcut Layer: 40, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 44 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 45 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 46 Shortcut Layer: 43, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 47 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 48 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 49 Shortcut Layer: 46, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 50 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 51 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 52 Shortcut Layer: 49, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 53 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 54 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 55 Shortcut Layer: 52, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 56 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 57 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 58 Shortcut Layer: 55, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 59 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 60 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 61 Shortcut Layer: 58, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 62 conv 1024 3 x 3/ 2 38 x 38 x 512 -> 19 x 19 x1024 3.407 BF\n 63 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 64 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 65 Shortcut Layer: 62, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 66 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 67 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 68 Shortcut Layer: 65, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 69 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 70 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 71 Shortcut Layer: 68, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 72 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 73 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 74 Shortcut Layer: 71, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 75 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 76 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 77 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 78 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 79 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 80 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 81 conv 18 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 18 0.013 BF\n 82 yolo\n[yolo] params: iou loss: mse (2), iou_norm: 0.75, obj_norm: 1.00, cls_norm: 1.00, delta_norm: 1.00, scale_x_y: 1.00\n 83 route 79 \t\t -> 19 x 19 x 512 \n 84 conv 256 1 x 1/ 1 19 x 19 x 512 -> 19 x 19 x 256 0.095 BF\n 85 upsample 2x 19 x 19 x 256 -> 38 x 38 x 256\n 86 route 85 61 \t -> 38 x 38 x 768 \n 87 conv 256 1 x 1/ 1 38 x 38 x 768 -> 38 x 38 x 256 0.568 BF\n 88 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 89 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 90 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 91 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 92 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 93 conv 18 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 18 0.027 BF\n 94 yolo\n[yolo] params: iou loss: mse (2), iou_norm: 0.75, obj_norm: 1.00, cls_norm: 1.00, delta_norm: 1.00, scale_x_y: 1.00\n 95 route 91 \t\t -> 38 x 38 x 256 \n 96 conv 128 1 x 1/ 1 38 x 38 x 256 -> 38 x 38 x 128 0.095 BF\n 97 upsample 2x 38 x 38 x 128 -> 76 x 76 x 128\n 98 route 97 36 \t -> 76 x 76 x 384 \n 99 conv 128 1 x 1/ 1 76 x 76 x 384 -> 76 x 76 x 128 0.568 BF\n 100 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 101 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 102 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 103 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 104 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 105 conv 18 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 18 0.053 BF\n 106 yolo\n[yolo] params: iou loss: mse (2), iou_norm: 0.75, obj_norm: 1.00, cls_norm: 1.00, delta_norm: 1.00, scale_x_y: 1.00\nTotal BFLOPS 139.496 \navg_outputs = 1103769 \n Allocate additional workspace_size = 52.43 MB \nLoading weights from yolo-obj_final_vehicle.weights...Done! Loaded 107 layers from weights-file \n144Total Detection Time: 6 Seconds\n CUDA-version: 10010 (10010), cuDNN: 7.6.5, CUDNN_HALF=1, GPU count: 1 \n OpenCV isn't used - data augmentation will be slow \n 0 : compute_capability = 750, cudnn_half = 1, GPU: Tesla T4 \n layer filters size/strd(dil) input output\n 0 conv 32 3 x 3/ 1 608 x 608 x 3 -> 608 x 608 x 32 0.639 BF\n 1 conv 64 3 x 3/ 2 608 x 608 x 32 -> 304 x 304 x 64 3.407 BF\n 2 conv 32 1 x 1/ 1 304 x 304 x 64 -> 304 x 304 x 32 0.379 BF\n 3 conv 64 3 x 3/ 1 304 x 304 x 32 -> 304 x 304 x 64 3.407 BF\n 4 Shortcut Layer: 1, wt = 0, wn = 0, outputs: 304 x 304 x 64 0.006 BF\n 5 conv 128 3 x 3/ 2 304 x 304 x 64 -> 152 x 152 x 128 3.407 BF\n 6 conv 64 1 x 1/ 1 152 x 152 x 128 -> 152 x 152 x 64 0.379 BF\n 7 conv 128 3 x 3/ 1 152 x 152 x 64 -> 152 x 152 x 128 3.407 BF\n 8 Shortcut Layer: 5, wt = 0, wn = 0, outputs: 152 x 152 x 128 0.003 BF\n 9 conv 64 1 x 1/ 1 152 x 152 x 128 -> 152 x 152 x 64 0.379 BF\n 10 conv 128 3 x 3/ 1 152 x 152 x 64 -> 152 x 152 x 128 3.407 BF\n 11 Shortcut Layer: 8, wt = 0, wn = 0, outputs: 152 x 152 x 128 0.003 BF\n 12 conv 256 3 x 3/ 2 152 x 152 x 128 -> 76 x 76 x 256 3.407 BF\n 13 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 14 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 15 Shortcut Layer: 12, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 16 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 17 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 18 Shortcut Layer: 15, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 19 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 20 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 21 Shortcut Layer: 18, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 22 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 23 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 24 Shortcut Layer: 21, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 25 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 26 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 27 Shortcut Layer: 24, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 28 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 29 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 30 Shortcut Layer: 27, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 31 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 32 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 33 Shortcut Layer: 30, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 34 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 35 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 36 Shortcut Layer: 33, wt = 0, wn = 0, outputs: 76 x 76 x 256 0.001 BF\n 37 conv 512 3 x 3/ 2 76 x 76 x 256 -> 38 x 38 x 512 3.407 BF\n 38 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 39 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 40 Shortcut Layer: 37, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 41 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 42 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 43 Shortcut Layer: 40, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 44 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 45 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 46 Shortcut Layer: 43, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 47 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 48 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 49 Shortcut Layer: 46, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 50 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 51 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 52 Shortcut Layer: 49, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 53 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 54 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 55 Shortcut Layer: 52, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 56 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 57 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 58 Shortcut Layer: 55, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 59 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 60 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 61 Shortcut Layer: 58, wt = 0, wn = 0, outputs: 38 x 38 x 512 0.001 BF\n 62 conv 1024 3 x 3/ 2 38 x 38 x 512 -> 19 x 19 x1024 3.407 BF\n 63 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 64 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 65 Shortcut Layer: 62, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 66 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 67 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 68 Shortcut Layer: 65, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 69 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 70 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 71 Shortcut Layer: 68, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 72 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 73 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 74 Shortcut Layer: 71, wt = 0, wn = 0, outputs: 19 x 19 x1024 0.000 BF\n 75 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 76 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 77 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 78 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 79 conv 512 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 512 0.379 BF\n 80 conv 1024 3 x 3/ 1 19 x 19 x 512 -> 19 x 19 x1024 3.407 BF\n 81 conv 18 1 x 1/ 1 19 x 19 x1024 -> 19 x 19 x 18 0.013 BF\n 82 yolo\n[yolo] params: iou loss: mse (2), iou_norm: 0.75, obj_norm: 1.00, cls_norm: 1.00, delta_norm: 1.00, scale_x_y: 1.00\n 83 route 79 \t\t -> 19 x 19 x 512 \n 84 conv 256 1 x 1/ 1 19 x 19 x 512 -> 19 x 19 x 256 0.095 BF\n 85 upsample 2x 19 x 19 x 256 -> 38 x 38 x 256\n 86 route 85 61 \t -> 38 x 38 x 768 \n 87 conv 256 1 x 1/ 1 38 x 38 x 768 -> 38 x 38 x 256 0.568 BF\n 88 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 89 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 90 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 91 conv 256 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 256 0.379 BF\n 92 conv 512 3 x 3/ 1 38 x 38 x 256 -> 38 x 38 x 512 3.407 BF\n 93 conv 18 1 x 1/ 1 38 x 38 x 512 -> 38 x 38 x 18 0.027 BF\n 94 yolo\n[yolo] params: iou loss: mse (2), iou_norm: 0.75, obj_norm: 1.00, cls_norm: 1.00, delta_norm: 1.00, scale_x_y: 1.00\n 95 route 91 \t\t -> 38 x 38 x 256 \n 96 conv 128 1 x 1/ 1 38 x 38 x 256 -> 38 x 38 x 128 0.095 BF\n 97 upsample 2x 38 x 38 x 128 -> 76 x 76 x 128\n 98 route 97 36 \t -> 76 x 76 x 384 \n 99 conv 128 1 x 1/ 1 76 x 76 x 384 -> 76 x 76 x 128 0.568 BF\n 100 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 101 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 102 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 103 conv 128 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 128 0.379 BF\n 104 conv 256 3 x 3/ 1 76 x 76 x 128 -> 76 x 76 x 256 3.407 BF\n 105 conv 18 1 x 1/ 1 76 x 76 x 256 -> 76 x 76 x 18 0.053 BF\n 106 yolo\n[yolo] params: iou loss: mse (2), iou_norm: 0.75, obj_norm: 1.00, cls_norm: 1.00, delta_norm: 1.00, scale_x_y: 1.00\nTotal BFLOPS 139.496 \navg_outputs = 1103769 \n Allocate additional workspace_size = 52.43 MB \nLoading weights from yolo-obj_final_vehicle.weights...Done! Loaded 107 layers from weights-file \n144Total Detection Time: 6 Seconds\nconf,pre,rec,f1-score,avg_iou,iou_thr,map\n0.20,0.97,0.99,0.98,78.99,([email protected]),99.86\n0.15,0.96,0.99,0.97,78.45,([email protected]),99.86\n0.10,0.96,0.99,0.97,78.45,([email protected]),99.86\n0.05,0.96,0.99,0.98,78.35,([email protected]),99.86\n0.20,0.97,0.99,0.98,78.99,([email protected]),99.86\n0.20,0.97,0.99,0.98,78.99,([email protected]),99.86\n0.20,0.97,0.99,0.98,78.99,([email protected]),99.86\n0.20,0.97,0.99,0.98,78.99,([email protected]),99.86\n0.20,0.97,0.99,0.98,78.99,([email protected]),99.86\n0.20,0.96,0.98,0.97,78.66,([email protected]),99.17\n\n" ], [ "%cd ..", "/content/drive/My Drive\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
c52ed96ee051b6d1b3c53ef33fcee493549a5e75
387,236
ipynb
Jupyter Notebook
model_final.ipynb
GustavoBMG/earthquake-prediction
a3278bb22385c78013269356990a60d60aba50f1
[ "MIT" ]
1
2021-11-09T20:16:31.000Z
2021-11-09T20:16:31.000Z
model_final.ipynb
GustavoBMG/earthquake-prediction
a3278bb22385c78013269356990a60d60aba50f1
[ "MIT" ]
null
null
null
model_final.ipynb
GustavoBMG/earthquake-prediction
a3278bb22385c78013269356990a60d60aba50f1
[ "MIT" ]
null
null
null
96.615768
80,928
0.589852
[ [ [ "import pandas as pd\nimport numpy as np\n\nimport zucaml.zucaml as ml\n\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\npd.set_option('display.max_columns', None)", "_____no_output_____" ] ], [ [ "#### gold", "_____no_output_____" ] ], [ [ "df_gold = ml.get_csv('data/gold/', 'gold', [])\n\ndf_gold = df_gold.sort_values(['date', 'x', 'y', 'z'], ascending = [True, True, True, True])\n\nresults_grid = pd.DataFrame({}, index = [])\n\nml.print_memory(df_gold)\ndf_gold[:5]", "Memory usage: 11.15 MB\n106,265 x 24\n" ] ], [ [ "#### features", "_____no_output_____" ] ], [ [ "target = 'target'\ntime_ref = 'date'\npid = 'zone_frame'\n\nall_features = {\n\n 'x': {\n 'class': 'location',\n 'type': 'categorical',\n 'subtype': 'onehot',\n 'level': 0,\n },\n\n 'y': {\n 'class': 'location',\n 'type': 'categorical',\n 'subtype': 'onehot',\n 'level': 0,\n },\n\n 'z': {\n 'class': 'location',\n 'type': 'categorical',\n 'subtype': 'onehot',\n 'level': 0,\n },\n \n 'energy|rolling.mean#30': {\n 'class': 'energy.ma',\n 'type': 'numeric',\n 'subtype': 'float',\n 'level': 0,\n },\n\n 'energy|rolling.mean#90': {\n 'class': 'energy.ma',\n 'type': 'numeric',\n 'subtype': 'float',\n 'level': 0,\n },\n\n 'energy|rolling.mean#180': {\n 'class': 'energy.ma',\n 'type': 'numeric',\n 'subtype': 'float',\n 'level': 0,\n },\n\n 'energy|rolling.mean#330': {\n 'class': 'energy.ma',\n 'type': 'numeric',\n 'subtype': 'float',\n 'level': 0,\n },\n\n 'energy|rolling.mean#360': {\n 'class': 'energy.ma',\n 'type': 'numeric',\n 'subtype': 'float',\n 'level': 0,\n },\n \n 'energy_neighbours|rolling.mean#30': {\n 'class': 'energy.ma',\n 'type': 'numeric',\n 'subtype': 'float',\n 'level': 0,\n },\n\n 'energy_neighbours|rolling.mean#90': {\n 'class': 'energy.ma',\n 'type': 'numeric',\n 'subtype': 'float',\n 'level': 0,\n },\n\n 'energy_neighbours|rolling.mean#180': {\n 'class': 'energy.ma',\n 'type': 'numeric',\n 'subtype': 'float',\n 'level': 0,\n },\n\n 'energy_neighbours|rolling.mean#330': {\n 'class': 'energy.ma',\n 'type': 'numeric',\n 'subtype': 'float',\n 'level': 0,\n },\n\n 'energy_neighbours|rolling.mean#360': {\n 'class': 'energy.ma',\n 'type': 'numeric',\n 'subtype': 'float',\n 'level': 0,\n },\n\n 'energy|rolling.mean#30||ratio||energy|rolling.mean#360': {\n 'class': 'energy.ratio',\n 'type': 'numeric',\n 'subtype': 'float',\n 'level': 0,\n },\n\n 'energy|rolling.mean#90||ratio||energy|rolling.mean#360': {\n 'class': 'energy.ratio',\n 'type': 'numeric',\n 'subtype': 'float',\n 'level': 0,\n },\n\n 'energy|rolling.mean#180||ratio||energy|rolling.mean#360': {\n 'class': 'energy.ratio',\n 'type': 'numeric',\n 'subtype': 'float',\n 'level': 0,\n },\n\n 'energy|rolling.mean#330||ratio||energy|rolling.mean#360': {\n 'class': 'energy.ratio',\n 'type': 'numeric',\n 'subtype': 'float',\n 'level': 0,\n },\n \n 'days.since.last': {\n 'class': 'info',\n 'type': 'numeric',\n 'subtype': 'int',\n 'level': 0,\n },\n\n}\n\ndiscarded_features = [feat for feat in df_gold if feat not in [feat2 for feat2 in all_features] + [target, time_ref, pid]]\nonehot_features = [feat for feat in all_features if all_features[feat]['type'] == 'categorical' and all_features[feat]['subtype'] == 'onehot']\n\nprint('Total features\\t\\t ' + str(len(all_features)))\nif len(discarded_features) > 0:\n print('Discarded features\\t ' + str(len(discarded_features)) + '\\t\\t' + str(discarded_features))\nprint('Numerical features\\t ' + str(sum([all_features[i]['type'] == 'numeric' for i in all_features])))\nprint('Categorical features\\t ' + str(sum([all_features[i]['type'] == 'categorical' for i in all_features])))\nif len(onehot_features) > 0:\n print('One-hot features\\t ' + str(len(onehot_features)) + '\\t\\t' + str(onehot_features))", "Total features\t\t 18\nDiscarded features\t 3\t\t['event', 'energy', 'energy_neighbours']\nNumerical features\t 15\nCategorical features\t 3\nOne-hot features\t 3\t\t['x', 'y', 'z']\n" ] ], [ [ "#### Problem", "_____no_output_____" ] ], [ [ "this_problem = ml.problems.BINARY\nmetrics = ['F0.5', 'precision', 'recall', 'roc_auc']", "_____no_output_____" ] ], [ [ "#### split train test", "_____no_output_____" ] ], [ [ "df_train, df_test = ml.split_by_time_ref(df_gold, 0.88, target, time_ref, this_problem, True)", " Total\tTotal perc\t Balance\t Events\t\n 95,181\t 90%\t 6.95%\t 6,612\t\n 11,084\t 10%\t 8.46%\t 938\t\n" ] ], [ [ "#### model", "_____no_output_____" ] ], [ [ "level_0_features = [feat for feat in all_features if all_features[feat]['level'] == 0]\nlevel_0_features_numeric = [feat for feat in level_0_features if feat not in onehot_features]\nlevel_0_features_onehot = [feat for feat in level_0_features if feat in onehot_features]\n\nlevel_0_features_energy_ma = [feat for feat in level_0_features if all_features[feat]['class'] == 'energy.ma']\nlevel_0_features_energy_ratio = [feat for feat in level_0_features if all_features[feat]['class'] == 'energy.ratio']\n\nnumber_categorical_onehot = df_train['x'].nunique() + df_train['y'].nunique() + df_train['z'].nunique()\n\nlevel_0_features_numeric_clip = [feat for feat in level_0_features_numeric if df_train[feat].abs().max() == np.inf]\nlevel_0_features_numeric_not_clip = [feat for feat in level_0_features_numeric if feat not in level_0_features_numeric_clip]\n\nlevel_0_features_location = [feat for feat in all_features if all_features[feat]['level'] == 0 and all_features[feat]['class'] == 'location']", "_____no_output_____" ], [ "%%time\n\n# ##########################\n# # linear models\n# ##########################\n\nlin_basic_config = {\n 'features': level_0_features,\n 'target': target,\n 'family': ml.lin(this_problem),\n 'algo': {\n 'penalty': 'l2',\n 'class_weight': 'balanced',\n },\n 'preprocess': {\n 'original': {\n 'features': level_0_features,\n 'transformer': ['filler', 'clipper', 'standard_scaler'],\n },\n },\n}\n\nlin_params = {\n 'algo:C' : [0.01, 0.1, 1.0],\n 'algo:solver': ['lbfgs', 'newton-cg'],\n 'preprocess:iforest': [None,\n {\n 'features': level_0_features,\n 'transformer': ['filler', 'clipper', 'iforest_score'],\n },\n ],\n 'preprocess:kmeans': [None,\n {\n 'features': [feat for feat in level_0_features if all_features[feat]['type'] == 'numeric' or all_features[feat]['subtype'] == 'bool'],\n 'transformer': ['filler', 'clipper', 'kmeans_distances'],\n },\n ],\n}\n\n# ##########################\n# # rft models\n# ##########################\n\nrft_basic_config = {\n 'features': level_0_features,\n 'target': target,\n 'family': ml.rft(this_problem),\n 'algo': {\n 'criterion': 'entropy',\n 'class_weight': 'balanced',\n },\n 'preprocess': {\n 'original': {\n 'features': level_0_features,\n 'transformer': ['filler', 'clipper'],\n },\n },\n}\n\nrft_params = {\n 'algo:max_depth': [6, 7, 8],\n 'algo:n_estimators': [25, 50, 75, 100, 150],\n 'preprocess:iforest': [None,\n {\n 'features': level_0_features,\n 'transformer': ['filler', 'clipper', 'iforest_score'],\n },\n ],\n 'preprocess:kmeans': [None,\n {\n 'features': [feat for feat in level_0_features if all_features[feat]['type'] == 'numeric' or all_features[feat]['subtype'] == 'bool'],\n 'transformer': ['filler', 'clipper', 'kmeans_distances'],\n },\n ],\n}\n\n# ##########################\n# # xgb models\n# ##########################\n\nxgb_basic_config = {\n 'features': level_0_features,\n 'target': target,\n 'family': ml.xgb(this_problem),\n 'algo': {\n 'criterion': 'entropy',\n 'scale_pos_weight': 'balanced',\n },\n 'preprocess': {\n 'original': {\n 'features': level_0_features,\n 'transformer': ['filler', 'clipper'],\n },\n },\n}\n\nxgb_params = {\n 'algo:max_depth': [5, 6, 7],\n 'algo:n_estimators': [15, 25, 35],\n 'preprocess:iforest': [None,\n {\n 'features': level_0_features,\n 'transformer': ['filler', 'clipper', 'iforest_score'],\n },\n ],\n 'preprocess:kmeans': [None,\n {\n 'features': [feat for feat in level_0_features if all_features[feat]['type'] == 'numeric' or all_features[feat]['subtype'] == 'bool'],\n 'transformer': ['filler', 'clipper', 'kmeans_distances'],\n },\n ],\n}\n\n# ##########################\n# # search\n# ##########################\n\nbasic_configs_and_params = []\nbasic_configs_and_params.append((lin_basic_config, lin_params))\nbasic_configs_and_params.append((rft_basic_config, rft_params))\nbasic_configs_and_params.append((xgb_basic_config, xgb_params))\n\ngrid_board, best_model = ml.grid_search(\n train = df_train,\n target = target,\n time_ref = time_ref,\n problem = this_problem,\n metrics = metrics,\n cv_strategy = ml.cv_strategies.TIME,\n k_fold = 3,\n percentage_test = 0.1,\n basic_configs_and_params = basic_configs_and_params,\n)\n\ngrid_board.sort_values(metrics[0], ascending = False).style.format(ml.results_format)", "[19:43:01] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:43:02] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:43:03] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:43:04] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:43:05] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:43:07] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:43:08] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:43:10] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:43:12] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:43:14] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:43:16] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:43:18] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:43:19] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:43:20] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:43:21] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:43:22] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:43:23] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:43:25] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:43:26] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:43:28] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:43:30] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:43:32] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:43:33] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:43:36] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:43:37] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:43:38] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:43:39] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:43:40] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:43:41] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:43:43] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:43:44] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:43:46] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:43:49] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:43:51] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:43:52] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:43:55] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:43:57] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:43:57] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:43:58] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:43:59] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:44:00] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:44:02] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:44:03] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:44:05] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:44:07] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:44:09] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:44:11] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:44:14] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:44:15] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:44:16] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:44:17] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:44:18] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:44:20] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:44:22] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:44:23] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:44:25] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:44:27] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:44:29] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:44:31] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:44:33] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:44:35] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:44:36] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:44:37] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:44:38] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:44:39] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:44:41] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:44:43] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:44:44] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:44:47] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:44:49] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:44:50] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:44:53] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:44:54] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:44:55] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:44:56] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:44:57] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:44:58] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:45:00] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:45:02] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:45:03] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:45:06] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:45:07] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:45:09] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:45:12] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:45:13] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:45:14] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:45:15] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:45:16] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:45:17] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:45:19] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:45:20] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:45:22] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:45:24] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:45:26] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:45:28] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:45:30] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:45:32] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:45:33] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:45:34] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:45:35] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:45:36] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:45:38] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:45:40] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:45:41] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:45:44] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:45:45] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:45:47] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n[19:45:50] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\nCPU times: user 51min 9s, sys: 2min 19s, total: 53min 28s\nWall time: 13min 48s\n" ], [ "label, results, register, model, final_features = ml.train_score_model(best_model, df_train, df_test, metrics)\n\nml.results_add_all(results_grid, label, results, register)\n\nresults_grid.style.format(ml.results_format)", "[19:45:53] WARNING: /Users/travis/build/dmlc/xgboost/src/learner.cc:573: \nParameters: { \"criterion\", \"params\" } might not be used.\n\n This may not be accurate due to some parameters are only used in language bindings but\n passed down to XGBoost core. Or some parameters are not used but slip through this\n verification. Please open an issue if you find above cases.\n\n\n" ], [ "importances_groups = {\n '|kmeans': 'sum',\n}\n\nml.plot_features_importances(model, final_features, importances_groups, True, False)", "Group: |kmeans - features: 2 - raw features: 10\n34% \t energy|rolling.mean#30\n26% \t energy|rolling.mean#330\n9% \t energy|rolling.mean#360\n6% \t distance|kmeans\n4% \t z\n4% \t energy|rolling.mean#180\n3% \t energy|rolling.mean#90\n2% \t y\n2% \t x\n2% \t energy_neighbours|rolling.mean#330\n1% \t energy|rolling.mean#30||ratio||energy|rolling.mean#360\n1% \t days.since.last\n1% \t energy|rolling.mean#180||ratio||energy|rolling.mean#360\n1% \t energy_neighbours|rolling.mean#180\n1% \t energy|rolling.mean#90||ratio||energy|rolling.mean#360\n1% \t energy_neighbours|rolling.mean#360\n1% \t energy|rolling.mean#330||ratio||energy|rolling.mean#360\n1% \t energy_neighbours|rolling.mean#30\n1% \t iforest_score\n1% \t energy_neighbours|rolling.mean#90\n0% \t cluster|kmeans\n" ], [ "shap_values = ml.get_shap_values(df_test[level_0_features], model, None)\n\nml.plot_beeswarm(shap_values, final_features)", "_____no_output_____" ], [ "residuals = ml.get_residuals(df_test, level_0_features, target, model, results['Threshold'])", "_____no_output_____" ], [ "residuals['tp'].sum(), residuals['fp'].sum(), residuals['fn'].sum()", "_____no_output_____" ], [ "notes = {}\n\nnotes['number_features'] = len(level_0_features)\n\nfor name_df, df in {'train': df_train, 'test': df_test}.items():\n\n notes[name_df + '_lenght'] = len(df)\n notes[name_df + '_balance'] = df[target].sum() / len(df)\n notes[name_df + '_number_id'] = df[pid].nunique()\n notes[name_df + '_number_time'] = df[time_ref].nunique()\n notes[name_df + '_min_time'] = df[time_ref].min().strftime(\"%Y%m%d\")\n notes[name_df + '_max_time'] = df[time_ref].max().strftime(\"%Y%m%d\")\n \n for feature in ['x', 'y', 'z']:\n notes[name_df + '_' + feature] = list(np.sort(df[feature].unique().astype(str)))\n\nml.save_model(model, label, results, df_train, notes)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
c52edab773bf792c32927fe801883ac05bb6299d
98,784
ipynb
Jupyter Notebook
10_45p_Copy_of_latest_sun_mar_01_unit_2_sprint_3.ipynb
LambdaTheda/DS-Unit-2-Linear-Models
cc910e08b45bab7ac4a5cd06c59646203422d190
[ "MIT" ]
null
null
null
10_45p_Copy_of_latest_sun_mar_01_unit_2_sprint_3.ipynb
LambdaTheda/DS-Unit-2-Linear-Models
cc910e08b45bab7ac4a5cd06c59646203422d190
[ "MIT" ]
null
null
null
10_45p_Copy_of_latest_sun_mar_01_unit_2_sprint_3.ipynb
LambdaTheda/DS-Unit-2-Linear-Models
cc910e08b45bab7ac4a5cd06c59646203422d190
[ "MIT" ]
null
null
null
49.318023
13,856
0.571408
[ [ [ "<a href=\"https://colab.research.google.com/github/LambdaTheda/DS-Unit-2-Linear-Models/blob/master/10_45p_Copy_of_latest_sun_mar_01_unit_2_sprint_3.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "_Lambda School Data Science, Unit 2_\n\n# Applied Modeling Sprint Challenge: Predict Chicago food inspections 🍕", "_____no_output_____" ], [ "For this Sprint Challenge, you'll use a dataset with information from inspections of restaurants and other food establishments in Chicago from January 2010 to March 2019. \n\n[See this PDF](https://data.cityofchicago.org/api/assets/BAD5301B-681A-4202-9D25-51B2CAE672FF) for descriptions of the data elements included in this dataset.\n\nAccording to [Chicago Department of Public Health — Food Protection Services](https://www.chicago.gov/city/en/depts/cdph/provdrs/healthy_restaurants/svcs/food-protection-services.html), \"Chicago is home to 16,000 food establishments like restaurants, grocery stores, bakeries, wholesalers, lunchrooms, mobile food vendors and more. Our business is food safety and sanitation with one goal, to prevent the spread of food-borne disease. We do this by inspecting food businesses, responding to complaints and food recalls.\" ", "_____no_output_____" ], [ "#### Your challenge: Predict whether inspections failed\n\nThe target is the `Fail` column.\n\n- When the food establishment failed the inspection, the target is `1`.\n- When the establishment passed, the target is `0`.", "_____no_output_____" ], [ "#### Run this cell to install packages in Colab:", "_____no_output_____" ] ], [ [ "%%capture\nimport sys\n\nif 'google.colab' in sys.modules:\n # Install packages in Colab\n !pip install category_encoders==2.*\n !pip install eli5\n !pip install pandas-profiling==2.*\n !pip install pdpbox\n !pip install shap", "_____no_output_____" ] ], [ [ "#### Run this cell to load the data:", "_____no_output_____" ] ], [ [ "import pandas as pd\n\ntrain_url = 'https://drive.google.com/uc?export=download&id=13_tP9JpLcZHSPVpWcua4t2rY44K_s4H5'\ntest_url = 'https://drive.google.com/uc?export=download&id=1GkDHjsiGrzOXoF_xcYjdzBTSjOIi3g5a'\n\ntrain = pd.read_csv(train_url)\ntest = pd.read_csv(test_url)\n\nassert train.shape == (51916, 17)\nassert test.shape == (17306, 17)", "_____no_output_____" ] ], [ [ "### Part 1: Preprocessing\n\nYou may choose which features you want to use, and whether/how you will preprocess them. If you use categorical features, you may use any tools and techniques for encoding.\n\n_To earn a score of 3 for this part, find and explain leakage. The dataset has a feature that will give you an ROC AUC score > 0.90 if you process and use the feature. Find the leakage and explain why the feature shouldn't be used in a real-world model to predict the results of future inspections._\n\n### Part 2: Modeling\n\n**Fit a model** with the train set. (You may use scikit-learn, xgboost, or any other library.) Use cross-validation or do a three-way split (train/validate/test) and **estimate your ROC AUC** validation score.\n\nUse your model to **predict probabilities** for the test set. **Get an ROC AUC test score >= 0.60.**\n\n_To earn a score of 3 for this part, get an ROC AUC test score >= 0.70 (without using the feature with leakage)._\n\n\n### Part 3: Visualization\n\nMake visualizations for model interpretation. (You may use any libraries.) Choose two of these types:\n\n- Confusion Matrix\n- Permutation Importances\n- Partial Dependence Plot, 1 feature isolation\n- Partial Dependence Plot, 2 features interaction\n- Shapley Values\n\n_To earn a score of 3 for this part, make four of these visualization types._", "_____no_output_____" ], [ "## Part 1: Preprocessing\n\n> You may choose which features you want to use, and whether/how you will preprocess them. If you use categorical features, you may use any tools and techniques for encoding.", "_____no_output_____" ] ], [ [ "# Exploratory Data Analyses", "_____no_output_____" ], [ "train.describe()", "_____no_output_____" ], [ "test.describe()", "_____no_output_____" ], [ "train.head()", "_____no_output_____" ], [ "test.head()", "_____no_output_____" ], [ "# check Nulls\ntrain.isnull().sum()", "_____no_output_____" ], [ "test.isnull().sum()", "_____no_output_____" ], [ "# check 'Fail' class imbalance via a Plot: Pass Vs Fail\n(train['Fail'].map({0: 'Passed', 1: 'Failed'}).value_counts(normalize=True) * 100)\\\n .plot.bar(title='Percentages of Inspection Results', figsize=(10, 5))", "_____no_output_____" ], [ "# Drop 'AKA Name' column in train set since I will use the \"DBA Name\" column and the former has nulls while the latter does not,\n# and both serve similar enough functions as a business identifier for my purposes\ntrain = train.drop(columns=['AKA Name'])", "_____no_output_____" ], [ "y = train['Fail']\ny.unique()", "_____no_output_____" ], [ "'''\nWhich evaluation measure is appropriate to use for a classification model with imbalanced classes?\nPrecision metric tells us how many predicted samples are relevant i.e. our mistakes into classifying sample as a correct one if it's not true. this metric is a good choice for the imbalanced classification scenario.May 9, 2019\n\nMetrics for Imbalanced Classification - Towards Data Science\n'''\n# May use PRECISION METRIC? (instead of Accuracy in ntbk) for validation because our 2 class ratio is about 3:1; ~significant imbalance\n\n# TEST INSTRUCTION: estimate your ROC AUC validation score\n\n# find how many of Pass and Failed in our train['Fail']\ny.value_counts(normalize=True)", "_____no_output_____" ], [ "import pandas as pd \n# from LS_DSPT4_231.ipynb (Mod 1)\n\n'''\nNext, do a time-based split:\nBrief Description: This dataset contains information from inspections of restaurants and other\nfood establishments in Chicago from January 1, 2010 to the present. \n'''\n\ntrain['Inspection Date'] = pd.to_datetime(train['Inspection Date'])", "_____no_output_____" ], [ "# TRIED to split val from train, but got AttributeError: Can only use .dt accessor with datetimelike values..\n# may have to feature engineer Inspection Date to parse out only date!\n\n# Attempt 2: Parsing out only YEAR from train['Inspection Date'] - works!\ntrain['Inspection Date'] = pd.to_datetime(train['Inspection Date'])\ntrain['Inspection Year'] = train['Inspection Date'].dt.year\n\ntest['Inspection Date'] = pd.to_datetime(test['Inspection Date'])\ntest['Inspection Year'] = test['Inspection Date'].dt.year\n", "_____no_output_____" ], [ "# split_train = train[train['Inspection Date'].dt.year <= 2016]\n# val = train[train['Inspection Date'].dt.year > 2017]\n\n# Check if ~80 % train; 20% val split was chosen\n#split_train.shape, val.shape", "_____no_output_____" ], [ "\n# May fine tune split using months additionally\n'''\nval.value_counts(normalize=True) (gives err df obj has no val_cnts attrib..)\n\n# check 'Fail' class imbalance via a Plot: Pass Vs Fail\n# ?!?!\n# (train_split['Inspection Year'].map({ ('Inspection Year'<= 2016): 'train_split', 1: 'Failed'}).value_counts(normalize=True) * 100)\\\n# .plot.bar(title='Percentages of Inspection Results', figsize=(10, 5))\n'''", "_____no_output_____" ], [ "train['Any Failed'] = train.groupby('Address')['Fail'].transform(lambda x: int((x == 1).any()))\ntest['Any Failed'] = test.groupby('Address')['Fail'].transform(lambda x: int((x == 1).any()))", "_____no_output_____" ] ], [ [ "## Part 2: Modeling\n\n> **Fit a model** with the train set. (You may use scikit-learn, xgboost, or any other library.) Use cross-validation or do a three-way split (train/validate/test) and **estimate your ROC AUC** validation score.\n>\n> Use your model to **predict probabilities** for the test set. **Get an ROC AUC test score >= 0.60.**", "_____no_output_____" ] ], [ [ "#ATTEMPT 2: getting invalid type promotion err\n\n# Try a shallow decision tree as a fast, first model\n\nimport category_encoders as ce\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.model_selection import train_test_split\n\ntarget = 'Fail'\n# features = ['Inspection Type', 'Any Failed', 'Facility Type', 'Latitude', 'Longitude']\nfeatures = ['Inspection Type', 'Zip', 'Any Failed', 'License #', 'Facility Type', 'Latitude', 'Longitude']\n\nX_train, X_test, y_train, y_test = train_test_split(train[features], train[target])\n\npipeline = make_pipeline(\n ce.OrdinalEncoder(),\n SimpleImputer(strategy='most_frequent'),\n RandomForestClassifier()\n)\n\npipeline.fit(X_train, y_train)\nacc_score = pipeline.score(test[features], test[target])\nra_score = roc_auc_score(test[target], pipeline.predict(test[features]))\n\nprint(f'Test Accuracy: {pipeline.score(X_test, y_test)}')\nprint(f'Test ROC AUC: {roc_auc_score(y_test, pipeline.predict(X_test))}\\n')\n\nprint(f'Val Accuracy: {acc_score}')\nprint(f'Val ROC AUC: {ra_score}')", "_____no_output_____" ] ], [ [ "## Part 3: Visualization\n\n> Make visualizations for model interpretation. (You may use any libraries.) Choose two of these types:\n>\n> - Permutation Importances\n> - Partial Dependence Plot, 1 feature isolation\n> - Partial Dependence Plot, 2 features interaction\n> - Shapley Values", "_____no_output_____" ] ], [ [ "#Perm Impt: https://colab.research.google.com/drive/1z1R0m3XsaZMjukynx2Ub-531Sh32xPln#scrollTo=QxhmJFxvKDbM (u2s3m3) ", "_____no_output_____" ], [ "# 1) PERMUTATION IMPORTANCES\n# a) just to peek at which features are important to our model, get feature importances\nrf = pipeline.named_steps['randomforestclassifier']\nimportances = pd.Series(rf.feature_importances_, X_train.columns)\n\n# Plot feature importances\n%matplotlib inline\nimport matplotlib.pyplot as plt\n\nn = 20\nplt.figure(figsize=(10,n/2))\nplt.title(f'Top {n} features')\nimportances.sort_values()[-n:].plot.barh(color='grey');", "_____no_output_____" ], [ "# BEFORE: Sequence of the feature to be permuted: from Features Importance above, chose Latitude adn Inspection type columns/features to Permute\nimport numpy as np\nfor feature in ['Latitude', 'Inspection Type']:\n\n# PERMUTE\n X_train_permuted = X_train.copy() #copy whole df to submit all at once\n X_train_permuted[feature] = np.random.permutation(X_train[feature])\n\n X_test_permuted = X_test.copy()\n X_test_permuted[feature] = np.random.permutation(X_test[feature])\n\n score = pipeline.score(X_test, y_test)\n score_permuted = pipeline.score(X_test_permuted, y_test) #Calc. accuracy on the permuted val dataset\n\n print(f'Validation accuracy with {feature}: {score}')\n print(f'Validation accuracy with {feature} permuted: {score_permuted}')\n print(f'Permutation importance: {acc_score - score_permuted}\\n')", "Validation accuracy with Latitude: 0.7466676939671778\nValidation accuracy with Latitude permuted: 0.7516757839587025\nPermutation importance: 0.0610481268236851\n\nValidation accuracy with Inspection Type: 0.7466676939671778\nValidation accuracy with Inspection Type permuted: 0.6775560520841359\nPermutation importance: 0.13516785869825165\n\n" ], [ "#2) Shapley Values: SHAP Values (an acronym from SHapley Additive exPlanations) break down a prediction to show the impact of each feature.\n# from https://colab.research.google.com/drive/1r2VFMtBAt3sLVIQfsMWyQXt8hB9gziRA#scrollTo=Ep1aBVpVcrDj (FINAL VERSION 234 u2s3m4.ipynb)\n\nimport category_encoders as ce\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.pipeline import make_pipeline\nfrom xgboost import XGBClassifier\n\nprocessor = make_pipeline(\n ce.OrdinalEncoder(), \n SimpleImputer(strategy='median')\n)\n\nval = train[train['Inspection Date'].dt.year > 2017]\nX_val = val[features]\ny_val = val[target]\n\nX_train_processed = processor.fit_transform(X_train)\nX_val_processed = processor.transform(X_val)\n\neval_set = [(X_train_processed, y_train), \n (X_val_processed, y_val)]\n\nmodel = XGBClassifier(n_estimators=1000, n_jobs=-1)\nmodel.fit(X_train_processed, y_train, eval_set=eval_set, eval_metric='auc', \n early_stopping_rounds=10)", "[0]\tvalidation_0-auc:0.760194\tvalidation_1-auc:0.767082\nMultiple eval metrics have been passed: 'validation_1-auc' will be used for early stopping.\n\nWill train until validation_1-auc hasn't improved in 10 rounds.\n[1]\tvalidation_0-auc:0.760194\tvalidation_1-auc:0.767082\n[2]\tvalidation_0-auc:0.773123\tvalidation_1-auc:0.781638\n[3]\tvalidation_0-auc:0.773123\tvalidation_1-auc:0.781638\n[4]\tvalidation_0-auc:0.773123\tvalidation_1-auc:0.781638\n[5]\tvalidation_0-auc:0.773209\tvalidation_1-auc:0.781871\n[6]\tvalidation_0-auc:0.773209\tvalidation_1-auc:0.781871\n[7]\tvalidation_0-auc:0.780834\tvalidation_1-auc:0.790041\n[8]\tvalidation_0-auc:0.780834\tvalidation_1-auc:0.790041\n[9]\tvalidation_0-auc:0.780834\tvalidation_1-auc:0.790041\n[10]\tvalidation_0-auc:0.780834\tvalidation_1-auc:0.790041\n[11]\tvalidation_0-auc:0.799053\tvalidation_1-auc:0.803101\n[12]\tvalidation_0-auc:0.799053\tvalidation_1-auc:0.803101\n[13]\tvalidation_0-auc:0.807124\tvalidation_1-auc:0.813122\n[14]\tvalidation_0-auc:0.807124\tvalidation_1-auc:0.813122\n[15]\tvalidation_0-auc:0.808165\tvalidation_1-auc:0.813673\n[16]\tvalidation_0-auc:0.807818\tvalidation_1-auc:0.812828\n[17]\tvalidation_0-auc:0.810407\tvalidation_1-auc:0.815963\n[18]\tvalidation_0-auc:0.811788\tvalidation_1-auc:0.817588\n[19]\tvalidation_0-auc:0.812834\tvalidation_1-auc:0.817827\n[20]\tvalidation_0-auc:0.811796\tvalidation_1-auc:0.817101\n[21]\tvalidation_0-auc:0.813876\tvalidation_1-auc:0.818093\n[22]\tvalidation_0-auc:0.814176\tvalidation_1-auc:0.818293\n[23]\tvalidation_0-auc:0.812997\tvalidation_1-auc:0.817542\n[24]\tvalidation_0-auc:0.813367\tvalidation_1-auc:0.817514\n[25]\tvalidation_0-auc:0.814796\tvalidation_1-auc:0.818592\n[26]\tvalidation_0-auc:0.815387\tvalidation_1-auc:0.818592\n[27]\tvalidation_0-auc:0.816879\tvalidation_1-auc:0.82183\n[28]\tvalidation_0-auc:0.819187\tvalidation_1-auc:0.821906\n[29]\tvalidation_0-auc:0.819069\tvalidation_1-auc:0.822053\n[30]\tvalidation_0-auc:0.819261\tvalidation_1-auc:0.822245\n[31]\tvalidation_0-auc:0.81938\tvalidation_1-auc:0.82342\n[32]\tvalidation_0-auc:0.819173\tvalidation_1-auc:0.822318\n[33]\tvalidation_0-auc:0.819857\tvalidation_1-auc:0.822312\n[34]\tvalidation_0-auc:0.820991\tvalidation_1-auc:0.826257\n[35]\tvalidation_0-auc:0.821016\tvalidation_1-auc:0.826126\n[36]\tvalidation_0-auc:0.821008\tvalidation_1-auc:0.825478\n[37]\tvalidation_0-auc:0.821315\tvalidation_1-auc:0.825337\n[38]\tvalidation_0-auc:0.82137\tvalidation_1-auc:0.825339\n[39]\tvalidation_0-auc:0.821592\tvalidation_1-auc:0.825598\n[40]\tvalidation_0-auc:0.823953\tvalidation_1-auc:0.826883\n[41]\tvalidation_0-auc:0.824232\tvalidation_1-auc:0.826762\n[42]\tvalidation_0-auc:0.824101\tvalidation_1-auc:0.826741\n[43]\tvalidation_0-auc:0.82418\tvalidation_1-auc:0.826959\n[44]\tvalidation_0-auc:0.824724\tvalidation_1-auc:0.827794\n[45]\tvalidation_0-auc:0.825857\tvalidation_1-auc:0.831428\n[46]\tvalidation_0-auc:0.825649\tvalidation_1-auc:0.830205\n[47]\tvalidation_0-auc:0.825709\tvalidation_1-auc:0.830597\n[48]\tvalidation_0-auc:0.825482\tvalidation_1-auc:0.830202\n[49]\tvalidation_0-auc:0.825659\tvalidation_1-auc:0.831238\n[50]\tvalidation_0-auc:0.825766\tvalidation_1-auc:0.831092\n[51]\tvalidation_0-auc:0.825795\tvalidation_1-auc:0.83119\n[52]\tvalidation_0-auc:0.826209\tvalidation_1-auc:0.831584\n[53]\tvalidation_0-auc:0.826333\tvalidation_1-auc:0.832066\n[54]\tvalidation_0-auc:0.826969\tvalidation_1-auc:0.832503\n[55]\tvalidation_0-auc:0.827237\tvalidation_1-auc:0.832022\n[56]\tvalidation_0-auc:0.827394\tvalidation_1-auc:0.832456\n[57]\tvalidation_0-auc:0.827636\tvalidation_1-auc:0.832671\n[58]\tvalidation_0-auc:0.828323\tvalidation_1-auc:0.833207\n[59]\tvalidation_0-auc:0.828319\tvalidation_1-auc:0.833054\n[60]\tvalidation_0-auc:0.828421\tvalidation_1-auc:0.833281\n[61]\tvalidation_0-auc:0.828948\tvalidation_1-auc:0.834079\n[62]\tvalidation_0-auc:0.829003\tvalidation_1-auc:0.834083\n[63]\tvalidation_0-auc:0.829337\tvalidation_1-auc:0.834533\n[64]\tvalidation_0-auc:0.829378\tvalidation_1-auc:0.834507\n[65]\tvalidation_0-auc:0.829358\tvalidation_1-auc:0.834761\n[66]\tvalidation_0-auc:0.829341\tvalidation_1-auc:0.834803\n[67]\tvalidation_0-auc:0.830288\tvalidation_1-auc:0.835117\n[68]\tvalidation_0-auc:0.83047\tvalidation_1-auc:0.834723\n[69]\tvalidation_0-auc:0.830489\tvalidation_1-auc:0.834877\n[70]\tvalidation_0-auc:0.830832\tvalidation_1-auc:0.835552\n[71]\tvalidation_0-auc:0.830826\tvalidation_1-auc:0.835522\n[72]\tvalidation_0-auc:0.831347\tvalidation_1-auc:0.835945\n[73]\tvalidation_0-auc:0.831416\tvalidation_1-auc:0.835988\n[74]\tvalidation_0-auc:0.831529\tvalidation_1-auc:0.836144\n[75]\tvalidation_0-auc:0.831706\tvalidation_1-auc:0.835964\n[76]\tvalidation_0-auc:0.831795\tvalidation_1-auc:0.836362\n[77]\tvalidation_0-auc:0.831854\tvalidation_1-auc:0.836527\n[78]\tvalidation_0-auc:0.831857\tvalidation_1-auc:0.836414\n[79]\tvalidation_0-auc:0.83194\tvalidation_1-auc:0.836528\n[80]\tvalidation_0-auc:0.831909\tvalidation_1-auc:0.836495\n[81]\tvalidation_0-auc:0.832001\tvalidation_1-auc:0.836659\n[82]\tvalidation_0-auc:0.832294\tvalidation_1-auc:0.836775\n[83]\tvalidation_0-auc:0.832282\tvalidation_1-auc:0.836767\n[84]\tvalidation_0-auc:0.832669\tvalidation_1-auc:0.837159\n[85]\tvalidation_0-auc:0.832625\tvalidation_1-auc:0.837201\n[86]\tvalidation_0-auc:0.832642\tvalidation_1-auc:0.837049\n[87]\tvalidation_0-auc:0.832896\tvalidation_1-auc:0.837295\n[88]\tvalidation_0-auc:0.832986\tvalidation_1-auc:0.837338\n[89]\tvalidation_0-auc:0.83306\tvalidation_1-auc:0.83746\n[90]\tvalidation_0-auc:0.833237\tvalidation_1-auc:0.837787\n[91]\tvalidation_0-auc:0.833483\tvalidation_1-auc:0.8385\n[92]\tvalidation_0-auc:0.8335\tvalidation_1-auc:0.838511\n[93]\tvalidation_0-auc:0.833607\tvalidation_1-auc:0.8384\n[94]\tvalidation_0-auc:0.833643\tvalidation_1-auc:0.838595\n[95]\tvalidation_0-auc:0.834005\tvalidation_1-auc:0.839055\n[96]\tvalidation_0-auc:0.834198\tvalidation_1-auc:0.839203\n[97]\tvalidation_0-auc:0.834289\tvalidation_1-auc:0.839293\n[98]\tvalidation_0-auc:0.834469\tvalidation_1-auc:0.839468\n[99]\tvalidation_0-auc:0.834599\tvalidation_1-auc:0.839554\n[100]\tvalidation_0-auc:0.834666\tvalidation_1-auc:0.839481\n[101]\tvalidation_0-auc:0.834735\tvalidation_1-auc:0.839569\n[102]\tvalidation_0-auc:0.834826\tvalidation_1-auc:0.83958\n[103]\tvalidation_0-auc:0.834847\tvalidation_1-auc:0.839607\n[104]\tvalidation_0-auc:0.834963\tvalidation_1-auc:0.839581\n[105]\tvalidation_0-auc:0.835096\tvalidation_1-auc:0.839952\n[106]\tvalidation_0-auc:0.835304\tvalidation_1-auc:0.840202\n[107]\tvalidation_0-auc:0.83536\tvalidation_1-auc:0.840348\n[108]\tvalidation_0-auc:0.835396\tvalidation_1-auc:0.840543\n[109]\tvalidation_0-auc:0.835536\tvalidation_1-auc:0.840703\n[110]\tvalidation_0-auc:0.835691\tvalidation_1-auc:0.841171\n[111]\tvalidation_0-auc:0.835743\tvalidation_1-auc:0.841335\n[112]\tvalidation_0-auc:0.835749\tvalidation_1-auc:0.841261\n[113]\tvalidation_0-auc:0.835825\tvalidation_1-auc:0.841298\n[114]\tvalidation_0-auc:0.835827\tvalidation_1-auc:0.841147\n[115]\tvalidation_0-auc:0.835972\tvalidation_1-auc:0.841273\n[116]\tvalidation_0-auc:0.836044\tvalidation_1-auc:0.841249\n[117]\tvalidation_0-auc:0.836075\tvalidation_1-auc:0.841311\n[118]\tvalidation_0-auc:0.836081\tvalidation_1-auc:0.8411\n[119]\tvalidation_0-auc:0.836205\tvalidation_1-auc:0.841158\n[120]\tvalidation_0-auc:0.836284\tvalidation_1-auc:0.841226\n[121]\tvalidation_0-auc:0.836436\tvalidation_1-auc:0.841326\nStopping. Best iteration:\n[111]\tvalidation_0-auc:0.835743\tvalidation_1-auc:0.841335\n\n" ], [ "from sklearn.metrics import roc_auc_score\nX_test_processed = processor.transform(X_test)\nclass_index = 1\ny_pred_proba = model.predict_proba(X_test_processed)[:, class_index]\nprint(f'Test ROC AUC for class {class_index}:')\nprint(roc_auc_score(y_test, y_pred_proba)) # Ranges from 0-1, higher is better", "Test ROC AUC for class 1:\n0.8293915652428773\n" ], [ "import shap\n\nexplainer = shap.TreeExplainer(model)\nrow_processed = processor.transform(row)\nshap_values = explainer.shap_values(row_processed)\n\nshap.initjs()\nshap.force_plot(\n base_value=explainer.expected_value, \n shap_values=shap_values, \n features=row, \n link='logit' # For classification, this shows predicted probabilities\n)", "Setting feature_perturbation = \"tree_path_dependent\" because no background data was given.\n" ], [ "", "_____no_output_____" ], [ "#******************** FROM 1ST TEST TAKING **************************", "_____no_output_____" ], [ "# 2) CONFUSION MATRIX -NM", "_____no_output_____" ], [ "#2) #Partial Dependence Plot, 1 feature interaction\n\n'''\nLater, when you save matplotlib images to include in blog posts or web apps,\nincrease the dots per inch (double it), so the text isn't so fuzzy\n'''\n\nimport matplotlib.pyplot as plt\nplt.rcParams['figure.dpi'] = 72", "_____no_output_____" ], [ "from sklearn.metrics import r2_score\nfrom xgboost import XGBRegressor\n\ngb = make_pipeline(\n ce.OrdinalEncoder(), \n XGBRegressor(n_estimators=200, objective='reg:squarederror', n_jobs=-1)\n)\n\ngb.fit(X_train, y_train)\ny_pred = gb.predict(X_val)\nprint('Gradient Boosting R^2', r2_score(y_val, y_pred))", "_____no_output_____" ], [ "from pdpbox.pdp import pdp_isolate, pdp_plot\n\nfeature = 'DBA'\n\nisolated = pdp_isolate(\n model = gb,\n dataset=X_val,\n model_features=X_val.columns,\n feature=feature\n)", "_____no_output_____" ], [ "pdp_plot(isolated, feature_name=feature);", "_____no_output_____" ], [ "pdp_plot(isolated, feature_name=feature);", "_____no_output_____" ], [ "from pdpbox.pdp import pdp_interact, pdp_interact_plot\nimport category_encoders as ce\nimport seaborn as sns\nfrom sklearn.ensemble import RandomForestClassifier\n\ntarget = 'Fail'\nfeatures = df.columns.drop(['Fail'])\n\nX = train[features]\ny = train[target]\n\n# Use Ordinal Encoder, outside of a pipeline\nencoder = ce.OrdinalEncoder()\nX_encoded = encoder.fit_transform(X)\n\nmodel = RandomForestClassifier(n_estimators=100, random_state=42, n_jobs=-1)\nmodel.fit(X_encoded, y)", "_____no_output_____" ], [ "# Use Pdpbox\n%matplotlib inline\nimport matplotlib.pyplot as plt\nfrom pdpbox import pdp\nfeature = 'Violation'\npdp_dist = pdp.pdp_isolate(model=model, dataset=X_encoded, model_features=features, feature=feature)\npdp.pdp_plot(pdp_dist, feature);", "_____no_output_____" ], [ "# Look at the encoder's mappings\nencoder.mapping", "_____no_output_____" ], [ "pdp.pdp_plot(pdp_dist, feature)\n\n# Manually change the xticks labels\nplt.xticks([1, 2], ['Violations', 'Fail']);", "_____no_output_____" ], [ "# Let's automate it\n\nfeature = 'Violation'\nfor item in encoder.mapping:\n if item['col'] == feature:\n feature_mapping = item['mapping']\n \nfeature_mapping = feature_mapping[feature_mapping.index.dropna()]\ncategory_names = feature_mapping.index.tolist()\ncategory_codes = feature_mapping.values.tolist()", "_____no_output_____" ], [ "pdp.pdp_plot(pdp_dist, feature)\n\n# Automatically change the xticks labels\nplt.xticks(category_codes, category_names);", "_____no_output_____" ], [ "pdp.pdp_plot(pdp_dist, feature)\n\n# Automatically change the xticks labels\nplt.xticks(category_codes, category_names);", "_____no_output_____" ], [ "pdp = interaction.pdp.pivot_table(\n values='preds', \n columns=features[0], # First feature on x axis\n index=features[1] # Next feature on y axis\n)[::-1] # Reverse the index order so y axis is ascending\n\npdp = pdp.rename(columns=dict(zip(category_codes, category_names)))\nplt.figure(figsize=(10,8))\nsns.heatmap(pdp, annot=True, fmt='.2f', cmap='viridis')\nplt.title('Partial Dependence of Inspection Failure, on Violation & Fails');", "_____no_output_____" ], [ "#Shapley\n# Assign to X, y\nfeatures = ['Risk', 'Violations', 'Inspection Type']\ntarget = 'Fail'\nX_train = train[features]\ny_train = train[target]\nX_test = test[features]\ny_test = test[target]", "_____no_output_____" ], [ "from scipy.stats import randint, uniform\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.model_selection import RandomizedSearchCV\n\nparam_distributions = { \n 'n_estimators': randint(50, 500), \n 'max_depth': [5, 10, 15, 20, None], \n 'max_features': uniform(0, 1), \n}\n\nsearch = RandomizedSearchCV(\n RandomForestRegressor(random_state=42), #want CLassifier though?\n param_distributions=param_distributions, \n n_iter=5, \n cv=2, \n scoring='neg_mean_absolute_error', \n verbose=10, \n return_train_score=True, \n n_jobs=-1, \n random_state=42\n)\n\nsearch.fit(X_train, y_train);", "_____no_output_____" ], [ "print('Best hyperparameters', search.best_params_)\nprint('Cross-validation MAE', -search.best_score_)\nmodel = search.best_estimator_", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
c52f114abab027aeba5705925ba86803346e2a84
72,224
ipynb
Jupyter Notebook
src/CMB_fit.ipynb
forero/GUASA2015
976b4ed1d6df8e1896462b68153ce9a51411dd23
[ "MIT" ]
null
null
null
src/CMB_fit.ipynb
forero/GUASA2015
976b4ed1d6df8e1896462b68153ce9a51411dd23
[ "MIT" ]
null
null
null
src/CMB_fit.ipynb
forero/GUASA2015
976b4ed1d6df8e1896462b68153ce9a51411dd23
[ "MIT" ]
null
null
null
322.428571
35,226
0.921051
[ [ [ "%pylab inline\nimport os\nfrom astroML.decorators import pickle_results", "Populating the interactive namespace from numpy and matplotlib\n" ], [ "def make_pow_spec(omch2=0.112, omk=0.0):\n ! rm -f vary.ini\n ! rm -f input.ini\n ! rm -f test_scalCls.dat\n omega_m = 'echo \"omch2 = {}\" >> vary.ini'.format(omch2)\n omega_k = 'echo \"omk = {}\" >> vary.ini'.format(omk)\n os.system(omega_m)\n os.system(omega_k)\n ! cat vary.ini fixed_params.ini > input.ini\n ! /home/forero/github/pycamb/camb/./camb input.ini > output.dat\n data = loadtxt(\"test_scalCls.dat\")\n return data[:,0], data[:,1]\n\ndef load_planck_data():\n data = loadtxt(\"../data/COM_PowerSpect_CMB_R1.10.txt\")\n return data[:,0], data[:,3], data[:,4]", "_____no_output_____" ], [ "ll, powspec = make_pow_spec(omch2=0.100, omk=0.1)\nll_obs, powspec_obs, sigma_powspec_obs = load_planck_data()", "_____no_output_____" ], [ "fig = plt.figure(figsize=(10, 8.0))\nscatter(ll_obs, powspec_obs,s=50)\nplot(ll, powspec, label=\" o_mh2={} o_k={}\".format(0.100,0.1))\nxlabel(\"l\")\nylabel(\"Power\")\nlegend()", "_____no_output_____" ], [ "ll, powspec = make_pow_spec()\nll_obs, powspec_obs, sigma_powspec_obs = load_planck_data()", "_____no_output_____" ], [ "fig = plt.figure(figsize=(10, 8.0))\nscatter(ll_obs, powspec_obs,s=50)\nplot(ll, powspec)\nplot(ll, powspec, label=\" o_mh2={} o_k={}\".format(0.112,0.0))\nxlabel(\"l\")\nylabel(\"Power\")\nlegend()", "_____no_output_____" ], [ "def compute_logL(beta):\n ll, powspec = make_pow_spec(omch2=beta[0],omk=beta[1])\n ll_obs, powspec_obs, powspec_sigma_obs = load_planck_data() \n mask1 = np.in1d(ll, ll_obs)\n mask2 = np.in1d(ll_obs, ll)\n return - sum( 0.5 *((powspec[mask1] - powspec_obs[mask2])/powspec_sigma_obs[mask2])**2)", "_____no_output_____" ], [ "#------------------------------------------------------------\n# Define a function to compute (and save to file) the log-likelihood\n@pickle_results('cmb_power.pkl')\ndef compute_cmb_power(Nbins=10):\n omegaM = np.linspace(0.05, 0.15, Nbins)\n omegaK = np.linspace(-0.1, 0.1, Nbins)\n\n logL = np.empty((Nbins, Nbins))\n\n for i in range(len(omegaM)):\n print '%i / %i' % (i + 1, len(omegaM))\n for j in range(len(omegaK)):\n logL[i, j] = compute_logL([omegaM[i], omegaK[j]])\n\n return omegaM, omegaK, logL\n\nomegaM, omegaK, res = compute_cmb_power()\nres -= np.max(res)", "@pickle_results: using precomputed results from 'cmb_power.pkl'\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
c52f21c2ff2d156e7a9fbd33bc1f5a927f1dd60e
2,649
ipynb
Jupyter Notebook
Python/Day 20/Geo-coding and Places AP with Google Maps.ipynb
georgethedeveloper77/30DAYS-of-Python
f9319ede6d871b00569d674fc4042fbb2ad6c7d5
[ "MIT" ]
null
null
null
Python/Day 20/Geo-coding and Places AP with Google Maps.ipynb
georgethedeveloper77/30DAYS-of-Python
f9319ede6d871b00569d674fc4042fbb2ad6c7d5
[ "MIT" ]
3
2021-06-08T22:12:53.000Z
2022-03-12T00:47:44.000Z
Python/Day 20/Geo-coding and Places AP with Google Maps.ipynb
georgethedeveloper77/30DAYS-of-Python
f9319ede6d871b00569d674fc4042fbb2ad6c7d5
[ "MIT" ]
null
null
null
23.236842
162
0.547376
[ [ [ "### First steps\n1. google cloud\n2. Create project inside google cloud\n3. Activate our API Services -> Geocoding Api & Places API\n4. Get API Key & restrict\n ", "_____no_output_____" ] ], [ [ "api_key = \"AIzaSyDD1CyceDFrhr3kzJkdHMFHQYaws7qwdyI\"\n", "_____no_output_____" ] ], [ [ "Client\n1. Google Maps API Docs\n2. Geocoding API\n3. Places API \n", "_____no_output_____" ] ], [ [ "from urllib.parse import urlencode", "_____no_output_____" ], [ "data_type = 'json'\nendpoint = f\"https://maps.googleapis.com/maps/api/geocode/{data_type}\"\nparams = {\"address\" : \"1600 Amphitheatre Parkway, Mountain+View, CA\", \"key\": api_key}\nurl_params = urlencode(params)\n\nurl = f\"{endpoint}?{url_params}\"\nprint(url)", "https://maps.googleapis.com/maps/api/geocode/json?address=1600+Amphitheatre+Parkway%2C+Mountain%2BView%2C+CA&key=AIzaSyDD1CyceDFrhr3kzJkdHMFHQYaws7qwdyI\n" ], [ "def extract_lat_lng(address_or_postalcode, data_type = \"json\"):\n endpoint = f\"https://maps.googleapis.com/maps/api/geocode/{data_type}\"\n params = {\"address\" : \"1600 Amphitheatre Parkway, Mountain+View, CA\", \"key\": api_key}\n url_params = urlencode(params)\n url = f\"{endpoint}?{url_params}\"\n return url\n", "_____no_output_____" ], [ "extract_lat_lng(\"1600 Amphitheatre Parkway, Mountain+View, CA\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
c52f22a99a1adfc41827fa33350d6a38f46a5b50
8,125
ipynb
Jupyter Notebook
Supporting_Notebooks/Converting-Multivariate-Equations-to-Univariate.ipynb
cxlldhty/Kalman-and-Bayesian-Filters-in-Python
ebfb45ba4115ff5a177d42b22182f8e42deb1e24
[ "CC-BY-4.0" ]
1
2019-09-03T00:47:42.000Z
2019-09-03T00:47:42.000Z
Supporting_Notebooks/Converting-Multivariate-Equations-to-Univariate.ipynb
cxlldhty/Kalman-and-Bayesian-Filters-in-Python
ebfb45ba4115ff5a177d42b22182f8e42deb1e24
[ "CC-BY-4.0" ]
null
null
null
Supporting_Notebooks/Converting-Multivariate-Equations-to-Univariate.ipynb
cxlldhty/Kalman-and-Bayesian-Filters-in-Python
ebfb45ba4115ff5a177d42b22182f8e42deb1e24
[ "CC-BY-4.0" ]
1
2020-01-08T16:30:18.000Z
2020-01-08T16:30:18.000Z
43.918919
642
0.567385
[ [ [ "#format the book\n%matplotlib inline\nfrom __future__ import division, print_function\nimport sys\nsys.path.insert(0, '..')\nimport book_format\nbook_format.set_style()", "_____no_output_____" ] ], [ [ "# Converting the Multivariate Equations to the Univariate Case\n\nThe multivariate Kalman filter equations do not resemble the equations for the univariate filter. However, if we use one dimensional states and measurements the equations do reduce to the univariate equations. This section will provide you with a strong intuition into what the Kalman filter equations are actually doing. While reading this section is not required to understand the rest of the book, I recommend reading this section carefully as it should make the rest of the material easier to understand.\n\nHere are the multivariate equations for the prediction. \n\n$$\n\\begin{aligned}\n\\mathbf{\\bar{x}} &= \\mathbf{F x} + \\mathbf{B u} \\\\\n\\mathbf{\\bar{P}} &= \\mathbf{FPF}^\\mathsf{T} + \\mathbf Q\n\\end{aligned}\n$$\n\nFor a univariate problem the state $\\mathbf x$ only has one variable, so it is a $1\\times 1$ matrix. Our motion $\\mathbf{u}$ is also a $1\\times 1$ matrix. Therefore, $\\mathbf{F}$ and $\\mathbf B$ must also be $1\\times 1$ matrices. That means that they are all scalars, and we can write\n\n$$\\bar{x} = Fx + Bu$$\n\nHere the variables are not bold, denoting that they are not matrices or vectors. \n\nOur state transition is simple - the next state is the same as this state, so $F=1$. The same holds for the motion transition, so, $B=1$. Thus we have\n\n$$x = x + u$$\n\nwhich is equivalent to the Gaussian equation from the last chapter\n\n$$ \\mu = \\mu_1+\\mu_2$$\n\nHopefully the general process is clear, so now I will go a bit faster on the rest. We have\n\n$$\\mathbf{\\bar{P}} = \\mathbf{FPF}^\\mathsf{T} + \\mathbf Q$$\n\nAgain, since our state only has one variable $\\mathbf P$ and $\\mathbf Q$ must also be $1\\times 1$ matrix, which we can treat as scalars, yielding \n\n$$\\bar{P} = FPF^\\mathsf{T} + Q$$\n\nWe already know $F=1$. The transpose of a scalar is the scalar, so $F^\\mathsf{T} = 1$. This yields\n\n$$\\bar{P} = P + Q$$\n\nwhich is equivalent to the Gaussian equation of \n\n$$\\sigma^2 = \\sigma_1^2 + \\sigma_2^2$$\n\nThis proves that the multivariate prediction equations are performing the same math as the univariate equations for the case of the dimension being 1.\n\nThese are the equations for the update step:\n\n$$\n\\begin{aligned}\n\\mathbf{K}&= \\mathbf{\\bar{P}H}^\\mathsf{T} (\\mathbf{H\\bar{P}H}^\\mathsf{T} + \\mathbf R)^{-1} \\\\\n\\textbf{y} &= \\mathbf z - \\mathbf{H \\bar{x}}\\\\\n\\mathbf x&=\\mathbf{\\bar{x}} +\\mathbf{K\\textbf{y}} \\\\\n\\mathbf P&= (\\mathbf{I}-\\mathbf{KH})\\mathbf{\\bar{P}}\n\\end{aligned}\n$$\n\nAs above, all of the matrices become scalars. $H$ defines how we convert from a position to a measurement. Both are positions, so there is no conversion, and thus $H=1$. Let's substitute in our known values and convert to scalar in one step. The inverse of a 1x1 matrix is the reciprocal of the value so we will convert the matrix inversion to division.\n\n$$\n\\begin{aligned}\nK &=\\frac{\\bar{P}}{\\bar{P} + R} \\\\\ny &= z - \\bar{x}\\\\\nx &=\\bar{x}+Ky \\\\\nP &= (1-K)\\bar{P}\n\\end{aligned}\n$$\n\nBefore we continue with the proof, I want you to look at those equations to recognize what a simple concept these equations implement. The residual $y$ is nothing more than the measurement minus the prediction. The gain $K$ is scaled based on how certain we are about the last prediction vs how certain we are about the measurement. We choose a new state $x$ based on the old value of $x$ plus the scaled value of the residual. Finally, we update the uncertainty based on how certain we are about the measurement. Algorithmically this should sound exactly like what we did in the last chapter.\n\nLet's finish off the algebra to prove this. Recall that the univariate equations for the update step are:\n\n$$\n\\begin{aligned}\n\\mu &=\\frac{\\sigma_1^2 \\mu_2 + \\sigma_2^2 \\mu_1} {\\sigma_1^2 + \\sigma_2^2}, \\\\\n\\sigma^2 &= \\frac{1}{\\frac{1}{\\sigma_1^2} + \\frac{1}{\\sigma_2^2}}\n\\end{aligned}\n$$\n\nHere we will say that $\\mu_1$ is the state $x$, and $\\mu_2$ is the measurement $z$. Thus it follows that that $\\sigma_1^2$ is the state uncertainty $P$, and $\\sigma_2^2$ is the measurement noise $R$. Let's substitute those in.\n\n$$\\begin{aligned} \\mu &= \\frac{Pz + Rx}{P+R} \\\\\n\\sigma^2 &= \\frac{1}{\\frac{1}{P} + \\frac{1}{R}}\n\\end{aligned}$$\n\nI will handle $\\mu$ first. The corresponding equation in the multivariate case is\n\n$$\n\\begin{aligned}\nx &= x + Ky \\\\\n&= x + \\frac{P}{P+R}(z-x) \\\\\n&= \\frac{P+R}{P+R}x + \\frac{Pz - Px}{P+R} \\\\\n&= \\frac{Px + Rx + Pz - Px}{P+R} \\\\\n&= \\frac{Pz + Rx}{P+R}\n\\end{aligned}\n$$\n\nNow let's look at $\\sigma^2$. The corresponding equation in the multivariate case is\n\n$$ \n\\begin{aligned}\nP &= (1-K)P \\\\\n&= (1-\\frac{P}{P+R})P \\\\\n&= (\\frac{P+R}{P+R}-\\frac{P}{P+R})P \\\\\n&= (\\frac{P+R-P}{P+R})P \\\\\n&= \\frac{RP}{P+R}\\\\\n&= \\frac{1}{\\frac{P+R}{RP}}\\\\\n&= \\frac{1}{\\frac{R}{RP} + \\frac{P}{RP}} \\\\\n&= \\frac{1}{\\frac{1}{P} + \\frac{1}{R}}\n\\quad\\blacksquare\n\\end{aligned}\n$$\n\nWe have proven that the multivariate equations are equivalent to the univariate equations when we only have one state variable. I'll close this section by recognizing one quibble - I hand waved my assertion that $H=1$ and $F=1$. In general we know this is not true. For example, a digital thermometer may provide measurement in volts, and we need to convert that to temperature, and we use $H$ to do that conversion. I left that issue out to keep the explanation as simple and streamlined as possible. It is very straightforward to add that generalization to the equations above, redo the algebra, and still have the same results.\\\\\\", "_____no_output_____" ] ] ]
[ "code", "markdown" ]
[ [ "code" ], [ "markdown" ] ]
c52f270da21f6ecdff6b9bc542fd9db6aa8be2d2
394,812
ipynb
Jupyter Notebook
notebook_examples/PlotStrain_example_v2.ipynb
ark0015/DetectorDesignSensitivities
0aee9d5dc5aa5dcd26d8ce5c3f3287ab91e6a0cf
[ "MIT" ]
null
null
null
notebook_examples/PlotStrain_example_v2.ipynb
ark0015/DetectorDesignSensitivities
0aee9d5dc5aa5dcd26d8ce5c3f3287ab91e6a0cf
[ "MIT" ]
null
null
null
notebook_examples/PlotStrain_example_v2.ipynb
ark0015/DetectorDesignSensitivities
0aee9d5dc5aa5dcd26d8ce5c3f3287ab91e6a0cf
[ "MIT" ]
3
2021-11-23T13:14:19.000Z
2022-02-03T00:10:03.000Z
411.2625
76,380
0.937874
[ [ [ "%load_ext autoreload\n%autoreload 2", "The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\n" ], [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nfrom matplotlib import cm\nfrom matplotlib import rc\nimport os, sys\n\nimport astropy.constants as const\nimport astropy.units as u\nfrom astropy.cosmology import z_at_value\nfrom astropy.cosmology import WMAP9 as cosmo\nfrom fractions import Fraction\n\nimport hasasia.sensitivity as hassens\nimport hasasia.sim as hassim", "_____no_output_____" ], [ "rc('text',usetex=True)\nrc('font',**{'family':'serif','serif':['Times New Roman'],'size':14})#,'weight':'bold'})", "_____no_output_____" ], [ "current_path = os.getcwd()\nsplt_path = current_path.split(\"/\")\ntop_path_idx = splt_path.index('DetectorDesignSensitivities')\ntop_directory = \"/\".join(splt_path[0:top_path_idx+1])\nload_directory = top_directory + '/LoadFiles/InstrumentFiles/'\n\nsys.path.insert(0,top_directory + '/Functions')\nimport StrainandNoise_v2 as SnN\nimport SNRcalc_v3 as SnC\n\nLISA_Other_filedirectory = load_directory + 'LISA_Other/StrainFiles/'\nLISA_Neil_filedirectory = load_directory + 'LISA_Neil/StrainFiles/'\nLISA_ESA_filedirectory = load_directory + 'LISA_ESA/StrainFiles/'\nET_filedirectory = load_directory + 'EinsteinTelescope/StrainFiles/'\naLIGO_filedirectory = load_directory + 'aLIGO/StrainFiles/'\nNANOGrav_filedirectory = load_directory + 'NANOGrav/StrainFiles/' \nEOBdiff_filedirectory = top_directory + '/LoadFiles/DiffStrain/EOBdiff/'", "_____no_output_____" ], [ "fig_save_idx = splt_path.index('Research')\nfig_save_location = \"/\".join(splt_path[0:fig_save_idx+1])\nfig_save_location += '/paperfigs'", "_____no_output_____" ], [ "axissize = 14\nlabelsize = 16\nlegendsize = 12\nfigsize = (10,8)\ncolornorm = colors.Normalize(vmin=0.0, vmax=5.0)\nlinesize = 3", "_____no_output_____" ] ], [ [ "####################################################################\n# Initialize different instruments", "_____no_output_____" ], [ "### aLIGO", "_____no_output_____" ] ], [ [ "#aLIGO\naLIGO_filename = 'aLIGODesign.txt'\naLIGO_filelocation = aLIGO_filedirectory + aLIGO_filename\n\naLIGO = SnN.GroundBased('aLIGO')\naLIGO.Default_Setup(aLIGO_filelocation)", "_____no_output_____" ] ], [ [ "### Einstein Telescope", "_____no_output_____" ] ], [ [ "#Einstein Telescope\nET_filename = 'ET_B_data.txt'\nET_filelocation = ET_filedirectory + ET_filename\nET_data = np.loadtxt(ET_filelocation)\n\nET = SnN.GroundBased('ET')\nET.Default_Setup(ET_filelocation)", "_____no_output_____" ] ], [ [ "### Plots of Ground Detectors", "_____no_output_____" ] ], [ [ "fig = plt.figure(figsize=(10,5))\nplt.loglog(ET.fT,ET.h_n_f,label='Einsteing Telescope B')\nplt.loglog(aLIGO.fT,aLIGO.h_n_f,label='Advanced LIGO')\nplt.xlabel(r'Frequency $[Hz]$',fontsize = labelsize)\nplt.ylabel('Characteristic Strain',fontsize = labelsize)\nplt.legend()\n\n#########################\n#Save Figure to File\nfigname = '/Ground_Char_Strain.pdf'\nfigloc = fig_save_location+figname\nisitsavetime = False\nif isitsavetime:\n fig.savefig(figloc, bbox_inches='tight')\n \nplt.show()", "_____no_output_____" ] ], [ [ "### LISA Martin data", "_____no_output_____" ] ], [ [ "#Martin data\nLISA_Martin_filename = 'LISA_Allocation_S_h_tot.txt'\nLISA_Martin_filelocation = LISA_Other_filedirectory + LISA_Martin_filename\n\nLISA_Martin = SnN.SpaceBased('LISA_Martin')\nLISA_Martin.Load_Data(LISA_Martin_filelocation)\nLISA_Martin.Get_Strain()", "_____no_output_____" ] ], [ [ "### LISA Neil Cornish data", "_____no_output_____" ] ], [ [ "#Neil Cornish data\nLISA_Neil_filename = 'LISA_sensitivity.txt'\nLISA_Neil_filelocation = LISA_Neil_filedirectory + LISA_Neil_filename\n\nLISA_Neil = SnN.SpaceBased('LISA_Neil')\nLISA_Neil.Load_Data(LISA_Neil_filelocation)\nLISA_Neil.Get_Strain()", "_____no_output_____" ] ], [ [ "### LISA Larson Sensitivity Curve", "_____no_output_____" ] ], [ [ "#Larson Sensitivity Curve\nLISA_Larson_filename = 'scg_6981.dat'\nLISA_Larson_filelocation = LISA_Other_filedirectory + LISA_Larson_filename\n\nLISA_Larson = SnN.SpaceBased('LISA_Larson')\nLISA_Larson.Load_Data(LISA_Larson_filelocation)\nLISA_Larson.Get_Strain()", "_____no_output_____" ] ], [ [ "### Below is wrong, not strain", "_____no_output_____" ] ], [ [ "fig = plt.figure(figsize=(10,5))\nplt.loglog(LISA_Martin.fT,LISA_Martin.h_n_f,label='LISA Martin file')\nplt.loglog(LISA_Neil.fT,LISA_Neil.h_n_f,label='LISA Neil file')\nplt.loglog(LISA_Larson.fT,LISA_Larson.h_n_f**2/np.sqrt(LISA_Larson.fT),label='LISA Larson file')\nplt.xlabel(r'Frequency $[Hz]$',fontsize = labelsize)\nplt.ylabel('Characteristic Strain',fontsize = labelsize)\nplt.xlim([5e-6,3])\nplt.legend()\n\n#########################\n#Save Figure to File\nfigname = '/Ground_Char_Strain.pdf'\nfigloc = fig_save_location+figname\nisitsavetime = False\nif isitsavetime:\n fig.savefig(figloc, bbox_inches='tight')\n \nplt.show()", "_____no_output_____" ] ], [ [ "### Numerical Relativity from EOB subtraction", "_____no_output_____" ], [ "#### Diff0002", "_____no_output_____" ] ], [ [ "diff0002 = SnN.TimeDomain('diff0002')\ndiff0002.Load_Strain()\ndiff0002.Get_hf_from_hcross_hplus()", "_____no_output_____" ] ], [ [ "#### Diff0114", "_____no_output_____" ] ], [ [ "diff0114 = SnN.TimeDomain('diff0114')\ndiff0114.Load_Strain()\ndiff0114.Get_hf_from_hcross_hplus()", "_____no_output_____" ] ], [ [ "#### Diff0178", "_____no_output_____" ] ], [ [ "diff0178 = SnN.TimeDomain('diff0178')\ndiff0178.Load_Strain()\ndiff0178.Get_hf_from_hcross_hplus()", "_____no_output_____" ] ], [ [ "#### Diff0261", "_____no_output_____" ] ], [ [ "diff0261 = SnN.TimeDomain('diff0261')\ndiff0261.Load_Strain()\ndiff0261.Get_hf_from_hcross_hplus()", "_____no_output_____" ] ], [ [ "#### Diff0303", "_____no_output_____" ] ], [ [ "diff0303 = SnN.TimeDomain('diff0303')\ndiff0303.Load_Strain()\ndiff0303.Get_hf_from_hcross_hplus()", "_____no_output_____" ], [ "plt.figure()\nplt.plot(diff0002.t,diff0002.h_plus_t)\nplt.plot(diff0002.t,diff0002.h_cross_t)\nplt.show()", "_____no_output_____" ], [ "plt.figure(figsize=(10,5))\nplt.plot(diff0002.natural_f,diff0002.natural_h_f)\nplt.xscale('log')\nplt.yscale('log')\nplt.show()", "_____no_output_____" ] ], [ [ "### NANOGrav continuous wave sensitivity", "_____no_output_____" ] ], [ [ "#NANOGrav continuous wave sensitivity\nNANOGrav_background = 4e-16 # Unsubtracted GWB amplitude: 0,4e-16\nNANOGrav_dp = 0.95 #Detection Probablility: 0.95,0.5\nNANOGrav_fap = 0.0001 #False Alarm Probability: 0.05,0.003,0.001,0.0001\nNANOGrav_Tobs = 15 #Observation years: 15,20,25\n\nNANOGrav_filename = 'cw_simulation_Ared_' + str(NANOGrav_background) + '_dp_' + str(NANOGrav_dp) \\\n + '_fap_' + str(NANOGrav_fap) + '_T_' + str(NANOGrav_Tobs) + '.txt'\nNANOGrav_filelocation = NANOGrav_filedirectory + NANOGrav_filename\n\nNANOGrav_Mingarelli_no_GWB = SnN.PTA('NANOGrav_Mingarelli_no_GWB')\nNANOGrav_Mingarelli_no_GWB.Load_Data(NANOGrav_filelocation)", "_____no_output_____" ], [ "#NANOGrav continuous wave sensitivity\nNANOGrav_background_2 = 0 # Unsubtracted GWB amplitude: 0,4e-16\nNANOGrav_dp_2 = 0.95 #Detection Probablility: 0.95,0.5\nNANOGrav_fap_2 = 0.0001 #False Alarm Probability: 0.05,0.003,0.001,0.0001\nNANOGrav_Tobs_2 = 15 #Observation years: 15,20,25\n\nNANOGrav_filename_2 = 'cw_simulation_Ared_' + str(NANOGrav_background_2) + '_dp_' + str(NANOGrav_dp_2) \\\n + '_fap_' + str(NANOGrav_fap_2) + '_T_' + str(NANOGrav_Tobs_2) + '.txt'\nNANOGrav_filelocation_2 = NANOGrav_filedirectory + NANOGrav_filename_2\n\nNANOGrav_Mingarelli_GWB = SnN.PTA('NANOGrav_Mingarelli_GWB')\nNANOGrav_Mingarelli_GWB.Load_Data(NANOGrav_filelocation_2)", "_____no_output_____" ] ], [ [ "### SKA parameters and methods from arXiv:0804.4476 section 7.1", "_____no_output_____" ] ], [ [ "###############################################\n#SKA calculation using parameters and methods from arXiv:0804.4476 section 7.1\nsigma_SKA = 10*u.ns.to('s')*u.s #sigma_rms timing residuals in nanoseconds to seconds\nT_SKA = 15*u.yr #Observing time in years\nN_p_SKA = 20 #Number of pulsars\ncadence_SKA = 1/(u.wk.to('yr')*u.yr) #Avg observation cadence of 1 every week in [number/yr]", "_____no_output_____" ], [ "SKA_Hazboun = SnN.PTA('SKA_Hazboun')\nSKA_Hazboun.Default_Setup_Hazboun_2019(T_SKA,N_p_SKA,sigma_SKA,cadence_SKA)", "_____no_output_____" ], [ "SKA_Moore = SnN.PTA('SKA_Moore')\nSKA_Moore.Default_Setup_Moore_2014(T_SKA,N_p_SKA,sigma_SKA,cadence_SKA)", "_____no_output_____" ] ], [ [ "#### Using Jeff's Methods/code https://arxiv.org/abs/1907.04341", "_____no_output_____" ], [ "### NANOGrav 11.5yr parameters https://arxiv.org/abs/1801.01837", "_____no_output_____" ] ], [ [ "###############################################\n#NANOGrav calculation using 11.5yr parameters https://arxiv.org/abs/1801.01837\nsigma_nano = 100*u.ns.to('s')*u.s #rms timing residuals in nanoseconds to seconds\nT_nano = 15*u.yr #Observing time in years\nN_p_nano = 18 #Number of pulsars\ncadence_nano = 1/(2*u.wk.to('yr')*u.yr) #Avg observation cadence of 1 every 2 weeks in number/year", "_____no_output_____" ], [ "NANOGrav_Hazboun = SnN.PTA('NANOGrav Hazboun')\nNANOGrav_Hazboun.Default_Setup_Hazboun_2019(T_nano,N_p_nano,sigma_nano,cadence_nano) ", "_____no_output_____" ], [ "NANOGrav_Moore = SnN.PTA('NANOGrav Moore')\nNANOGrav_Moore.Default_Setup_Moore_2014(T_nano,N_p_nano,sigma_nano,cadence_nano) ", "_____no_output_____" ], [ "fig = plt.figure(figsize=(10,8))\nplt.loglog(NANOGrav_Hazboun.fT,NANOGrav_Hazboun.h_n_f, linewidth = linesize,label = r'NANOGrav Hazboun')\nplt.loglog(NANOGrav_Moore.fT,NANOGrav_Moore.h_n_f,linestyle = '--', linewidth = linesize,label = r'NANOGrav Moore')\n\nplt.loglog(SKA_Moore.fT,SKA_Moore.h_n_f,linestyle = '--', linewidth = linesize,label = r'SKA Moore')\nplt.loglog(SKA_Hazboun.fT,SKA_Hazboun.h_n_f, linewidth = linesize,label = r'SKA Hazboun')\n\nplt.loglog(NANOGrav_Mingarelli_GWB.fT,NANOGrav_Mingarelli_GWB.h_n_f,linestyle = ':', linewidth = linesize,\\\n label = r'Mingarelli, et al. (2017) with GWB')\nplt.loglog(NANOGrav_Mingarelli_no_GWB.fT,NANOGrav_Mingarelli_no_GWB.h_n_f,linestyle = ':', linewidth = linesize,\\\n label = r'Mingarelli, et al. (2017) w/o GWB')\n\nplt.tick_params(axis = 'both',which = 'major', labelsize = axissize)\nplt.ylim([5e-19,1e-12])\nplt.xlim([1e-10,1e-6])\n#plt.title('NANOGrav (15yr)',fontsize=labelsize)\nplt.xlabel(r'Frequency $[Hz]$',fontsize = labelsize)\nplt.ylabel('Characteristic Strain',fontsize = labelsize)\nplt.legend(loc='lower right', fontsize = 12)\n\n#########################\n#Save Figure to File\nfigname = '/PTA_Char_Strain.pdf'\nfigloc = fig_save_location+figname\nisitsavetime = False\nif isitsavetime:\n fig.savefig(figloc, bbox_inches='tight')\n \nplt.show()", "_____no_output_____" ] ], [ [ "####################################################################\n# Calculate LISA amplitude spectral densities for various models", "_____no_output_____" ] ], [ [ "L = 2.5*u.Gm #armlength in Gm\nL = L.to('m')\nLISA_T_obs = 4*u.yr.to('s')*u.s\nGround_T_obs = 4*u.yr.to('s')*u.s", "_____no_output_____" ] ], [ [ "### LISA Calculation from https://arxiv.org/pdf/1702.00786.pdf (Amaro-Seaone 2017)", "_____no_output_____" ] ], [ [ "f_acc_break_low = .4*u.mHz.to('Hz')*u.Hz\nf_acc_break_high = 8.*u.mHz.to('Hz')*u.Hz\nf_IMS_knee = 2.*u.mHz.to('Hz')*u.Hz\nA_acc = 3e-15*u.m/u.s/u.s\nA_IMS = 10e-12*u.m\n\nBackground = False\n\nESA_LISA = SnN.SpaceBased('ESA_LISA')\nESA_LISA.Default_Setup(LISA_T_obs,L,A_acc,f_acc_break_low,f_acc_break_high,A_IMS,f_IMS_break,Background)", "_____no_output_____" ] ], [ [ "### Neil Calculation from https://arxiv.org/pdf/1803.01944.pdf", "_____no_output_____" ] ], [ [ "#Neil Calculation from https://arxiv.org/pdf/1803.01944.pdf\nf_acc_break_low = .4*u.mHz.to('Hz')*u.Hz\nf_acc_break_high = 8.*u.mHz.to('Hz')*u.Hz\nf_IMS_knee = 2.*u.mHz.to('Hz')*u.Hz\nA_acc = 3e-15*u.m/u.s/u.s\nA_IMS = 1.5e-11*u.m\nBackground = False\n \nNeil_LISA = SnN.SpaceBased('Neil_LISA')\nNeil_LISA.Default_Setup(LISA_T_obs,L,A_acc,f_acc_break_low,f_acc_break_high,A_IMS,f_IMS_break,Background)", "_____no_output_____" ] ], [ [ "### Plots of Space-Based Detectors", "_____no_output_____" ] ], [ [ "fig = plt.figure(figsize=(10,5))\nplt.loglog(ESA_LISA.fT,ESA_LISA.h_n_f,label='ESA LISA')\nplt.loglog(Neil_LISA.fT,Neil_LISA.h_n_f,label='Neil LISA')\n\nplt.xlabel(r'Frequency $[Hz]$',fontsize = labelsize)\nplt.ylabel('Characteristic Strain',fontsize = labelsize)\nplt.legend()\n#########################\n#Save Figure to File\nfigname = '/LISA_Char_Strain.pdf'\nfigloc = fig_save_location+figname\nisitsavetime = False\nif isitsavetime:\n fig.savefig(figloc, bbox_inches='tight')\n \nplt.show()", "_____no_output_____" ] ], [ [ "#######################################################################\n# BBH strain calculation", "_____no_output_____" ] ], [ [ "#Vars = [M,q,chi1,chi2,z]\nM = [1e6,65.0,1e10]\nq = [1.0,18.0,1.0]\nx1 = [0.95,0.0,-0.95]\nx2 = [0.95,0.0,-0.95]\nz = [3.0,0.093,20.0]\ninc = 0.0 #Doesn't really work...\n\nVars1 = [M[0],q[0],x1[0],x2[0],z[0]]\nVars2 = [M[1],q[1],x1[1],x2[1],z[1]]\nVars3 = [M[2],q[2],x1[2],x2[2],z[2]]\nVars4 = [M[1],q[0],x1[1],x2[1],z[1]]", "_____no_output_____" ], [ "source_1 = SnN.BlackHoleBinary()\nsource_1.Default_Setup(M[0],q[0],x1[0],x2[0],z[0],inc,ESA_LISA)", "_____no_output_____" ], [ "source_2 = SnN.BlackHoleBinary()\nsource_2.Default_Setup(M[1],q[1],x1[1],x2[1],z[1],inc,aLIGO)", "_____no_output_____" ], [ "source_3 = SnN.BlackHoleBinary()\nsource_3.Default_Setup(M[2],q[2],x1[2],x2[2],z[2],inc,SKA)", "_____no_output_____" ], [ "source_4 = SnN.BlackHoleBinary()\nsource_4.Default_Setup(M[1],q[0],x1[1],x2[1],z[1],inc,ET)", "_____no_output_____" ], [ "diff0002.Default_Setup(M[1],q[0],z[1])\ndiff0114.Default_Setup(M[1],q[0],z[1])\ndiff0178.Default_Setup(M[1],q[0],z[1])\ndiff0261.Default_Setup(M[1],q[0],z[1])\ndiff0303.Default_Setup(M[1],q[0],z[1])", "_____no_output_____" ], [ "fig,ax = plt.subplots(figsize = figsize)\nplt.loglog(ET.fT,ET.h_n_f, linewidth = linesize,color = cm.hsv(colornorm(1.75)),label = 'ET')\nplt.loglog(diff0002.f,diff0002.Get_CharStrain(),label = 'diff0002')\nplt.loglog(diff0114.f,diff0114.Get_CharStrain(),label = 'diff0114')\nplt.loglog(diff0178.f,diff0178.Get_CharStrain(),label = 'diff0178')\nplt.loglog(diff0261.f,diff0261.Get_CharStrain(),label = 'diff0261')\nplt.loglog(diff0303.f,diff0303.Get_CharStrain(),label = 'diff0303')\nplt.xlabel(r'Frequency $[Hz]$',fontsize = labelsize)\nplt.ylabel('Characteristic Strain',fontsize = labelsize)\nplt.legend()\nplt.show()", "_____no_output_____" ], [ "fig,ax = plt.subplots(figsize = figsize)\n#plt.loglog(NANOGrav_f,NANOGrav_h_f)\nax.loglog(SKA_no_GWB.fT,SKA_no_GWB.h_n_f, linewidth = linesize,color = cm.hsv(colornorm(0.0)),label = 'IPTA ~2030s')\nax.loglog(NANOGrav_approx_no_GWB.fT,NANOGrav_approx_no_GWB.h_n_f, linewidth = linesize,color = cm.hsv(colornorm(0.5)),label = 'NANOGrav (15yr)')\nax.loglog(ESA_LISA.fT,ESA_LISA.h_n_f, linewidth = linesize,color = cm.hsv(colornorm(1.75)),label = 'LISA')\nax.loglog(aLIGO.fT,aLIGO.h_n_f,color = cm.hsv(colornorm(2.8)),label = 'aLIGO')\nax.loglog(ET.fT,ET.h_n_f, linewidth = linesize,color = cm.hsv(colornorm(2.5)),label = 'Einstein Telescope')\nax.loglog(source_1.f,source_1.Get_CharStrain(), linewidth = linesize,color = cm.hsv(colornorm(0.8)),label = r'$M = %.1e$ $M_{\\odot}$, $q = %.1f$, $z = %.1f$, $\\chi_{i} = %.2f$' %(M[0],q[0],z[0],x1[0]))\nax.loglog(source_2.f,source_2.Get_CharStrain(), linewidth = linesize,color = cm.hsv(colornorm(3.0)),label = r'$M = %.1e$ $M_{\\odot}$, $q = %.1f$, $z = %.1f$, $\\chi_{i} = %.0f$' %(M[1],q[1],z[1],x1[1]))\nax.loglog(source_3.f,source_3.Get_CharStrain(), linewidth = linesize,color = cm.hsv(colornorm(4.5)),label = r'$M = %.1e$ $M_{\\odot}$, $q = %.1f$, $z = %.1f$, $\\chi_{i} = %.2f$' %(M[2],q[2],z[2],x1[2]))\n'''ax.scatter(fT[mono_idx_1],h_mono_1)\nax.scatter(ET_f[mono_idx_2],h_mono_2)\nax.scatter(NANOGrav_f[mono_idx_3],h_mono_3)'''\n\n\nax.set_xlim([8e-10, 1e4])\nax.set_ylim([1e-24, 1e-11])\nax.tick_params(axis = 'both',which = 'major', labelsize = axissize)\n\nax.set_xlabel(r'Frequency $[Hz]$',fontsize = labelsize)\nax.set_ylabel('Characteristic Strain',fontsize = labelsize)\nax.legend(loc='upper right', fontsize = legendsize)\nplt.show()", "_____no_output_____" ], [ "#########################\n#Save Figure to File\nfigname = '/Char_Strain_v1.pdf'\nfigloc = current_path+figname\nisitsavetime = False\nif isitsavetime:\n fig.savefig(figloc, bbox_inches='tight')", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
c52f2d81f543e76950053a428a1c4d8136872a26
738,753
ipynb
Jupyter Notebook
test/HOWTO.ipynb
Hypnus1803/pyFlowMaps
9c8e6d77701ab6cf79fcdab8b1bf5732a0d5bde3
[ "MIT" ]
1
2018-11-06T21:04:45.000Z
2018-11-06T21:04:45.000Z
test/HOWTO.ipynb
Hypnus1803/pyflowmaps
9c8e6d77701ab6cf79fcdab8b1bf5732a0d5bde3
[ "MIT" ]
null
null
null
test/HOWTO.ipynb
Hypnus1803/pyflowmaps
9c8e6d77701ab6cf79fcdab8b1bf5732a0d5bde3
[ "MIT" ]
1
2018-11-04T21:08:04.000Z
2018-11-04T21:08:04.000Z
2,629.014235
497,112
0.962189
[ [ [ "import glob, sys\nfrom IPython.display import HTML\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\nfrom astropy.io import fits\nfrom pyflowmaps.flow import flowLCT\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")", "_____no_output_____" ] ], [ [ "# Load the data\nWe include in the folder *data/* a cube fits file with the data coaligned and centered in a active region NOAA 1757.", "_____no_output_____" ] ], [ [ "cube = fits.getdata('data/cube_sunspot.fits')\nprint(cube.shape)", "(30, 128, 128)\n" ] ], [ [ "Look into one of the frames in the cube.", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(figsize=(10,10))\nim=ax.imshow(cube[15],origin='lower',cmap='gray')\nax.set_title('NOAA 1757 frame no. 15')\nax.set_xlabel('X-axis [pix]')\nax.set_ylabel('Y-axis [pix]')\nfig.colorbar(im,ax=ax,label='Intensity',shrink=0.82,aspect=15)", "_____no_output_____" ] ], [ [ "The shape of the data corresponds to 30 images with 128x128 pix dimesions per image. The frames are cut off from HMI/SDO data from 2013-01-05, intensity product, wtih a cadence of $720 s$, and the pixel size is around $\\sim 0.504$. Other parameter we need is the size of the apodization window $FWHM$ which for this example will be $3\\, arcsec$. This size depends on the size of the feature you want to study, as well as the resolution of your instrument. Other parameter that is neccesary is the average time over which the velocities will be calculated, but actually, it is included on the size of the input cube. For this example, the time over the average will be calculate is 6 hours ($30\\times720 s=21600 s=6 h$).", "_____no_output_____" ] ], [ [ "flows = flowLCT(cube, 3, 0.504, 720,method='square',interpolation='fivepoint',window='boxcar')", "_____no_output_____" ] ], [ [ "We extract the velocities", "_____no_output_____" ] ], [ [ "vx = flows.vx\nvy = flows.vy\nvz = flows.vz", "-8.673617379884035e-19 0.2813307552490982\n0.0 0.21734476653809726\n0.8955099947945748 22.00409316318381\n" ] ], [ [ "Velocities are returned in $kms^{-1}$. The velocity $v_z$ comes from\n$$\nv_z = h_m\\nabla\\cdot v_h(v_x,v_y) \n$$\nwhere $v_h$ are the horizontal velocities which depends on $v_x$ and $v_y$, whereas $h_m=150\\,km$ is the mass-flux scale-heigth [(November 1989, ApJ,344,494)](https://ui.adsabs.harvard.edu/abs/1989ApJ...344..494N/abstract). Some authors prefer to show the divergences instead of the $v_z$, so the user just need to divide $v_z/h_m$.\n\nNext, the users can also create colormaps and personlize them.", "_____no_output_____" ] ], [ [ "from matplotlib import cm\nfrom matplotlib.colors import ListedColormap\n\ntop = cm.get_cmap('Reds_r', 128)\nbottom = cm.get_cmap('YlGn', 128)\n\nnewcolors = np.vstack((top(np.linspace(0.3, 1, 128)),\n bottom(np.linspace(0, 0.75, 128))))\nnewcmp = ListedColormap(newcolors, name='RdYlGn')", "_____no_output_____" ] ], [ [ "Now, we will plot the flows in each horizontal direction, and the divergence.", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(1,3,figsize=(15,8),sharey=True)\nplt.subplots_adjust(wspace=0.03)\nflowx=ax[0].imshow(vx,origin='lower',cmap='RdYlGn',vmin = vx.mean()-3*vx.std(),vmax=vx.mean()+3*vx.std())\nax[0].set_title('Horizontal flowmap vx')\nax[0].set_xlabel('X-axis [pix]')\nax[0].set_ylabel('Y-axis [pix]')\n\nflowy=ax[1].imshow(vy,origin='lower',cmap='RdYlGn',vmin = vy.mean()-3*vy.std(),vmax=vy.mean()+3*vy.std())\nax[1].set_title('Horizontal flowmap vy')\nax[1].set_xlabel('X-axis [pix]')\n\ndiv = vz/150\nflowz=ax[2].imshow(div,origin='lower',cmap='RdYlGn',vmin = div.mean()-3*div.std(),vmax=div.mean()+3*div.std())\nax[2].set_title('Horizontal flowmap divergence')\nax[2].set_xlabel('X-axis [pix]')\n\nfig.colorbar(flowx,ax=ax[0],orientation='horizontal',shrink=1,label='vx [km/s]')\nfig.colorbar(flowy,ax=ax[1],orientation='horizontal',shrink=1,label='vy [km/s]')\nfig.colorbar(flowz,ax=ax[2],orientation='horizontal',shrink=1,label='divergence')\nfig.savefig('/Users/joseivan/pyflowmaps/images/flowmaps.jpg',format='jpeg',bbox_inches='tight')", "_____no_output_____" ] ], [ [ "Finally, we can also plot the arrows associated with the horizontal velocities", "_____no_output_____" ] ], [ [ "xx,yy = np.meshgrid(np.arange(128),np.arange(128)) # we create a grid\ndense = 2 # each how many pixels you want to plot arrows\nfig,ax = plt.subplots(figsize=(10,10))\nQ = ax.quiver(xx[::dense,::dense],yy[::dense,::dense],vx[::dense,::dense],vy[::dense,::dense], \n color='k', scale=8, headwidth= 4, headlength=4, width=0.0012)\nim = ax.imshow(cube[15],cmap='gray',origin='lower')\nax.set_title('Flowmap horizontal velocities overplotted')\nax.set_xlabel('X-axis [pix]')\nax.set_ylabel('Y-axis [pix]')\nfig.colorbar(im,ax=ax,label='Intensity',shrink=0.82,aspect=15)\nfig.savefig('/Users/joseivan/pyflowmaps/images/flowmaps_arrows.jpg',format='jpeg',bbox_inches='tight')", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
c52f3e7b0f476d6336c135b0e696f07f9f81c86e
9,119
ipynb
Jupyter Notebook
Python/.ipynb_checkpoints/Loops and Control structures-checkpoint.ipynb
jotathebest/UDI-IoT
0d7ecc98e5f1751cdcad61fd116685070b2848ba
[ "MIT" ]
null
null
null
Python/.ipynb_checkpoints/Loops and Control structures-checkpoint.ipynb
jotathebest/UDI-IoT
0d7ecc98e5f1751cdcad61fd116685070b2848ba
[ "MIT" ]
null
null
null
Python/.ipynb_checkpoints/Loops and Control structures-checkpoint.ipynb
jotathebest/UDI-IoT
0d7ecc98e5f1751cdcad61fd116685070b2848ba
[ "MIT" ]
1
2018-12-06T00:16:57.000Z
2018-12-06T00:16:57.000Z
23.563307
207
0.484373
[ [ [ "# Loops\n\nLoops is a basic statement in any programming language. Python supports the two typical loops:\n\n- for --> Loops in a pre-defined number of iterations\n- while --> Loops until a condition is reached\n", "_____no_output_____" ], [ "# For", "_____no_output_____" ] ], [ [ "# For\n\nfor i in range(1, 20):\n print(i)", "1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n" ], [ "# iterates a list to retrieve data\n\nmy_list = [1, 2, 2, 4, 8, 16]\n\nfor i in range(0, len(my_list)):\n value = my_list[i]\n print(\"iteration number: {}, value: {}\".format(i, value))", "iteration number: 0, value: 1\niteration number: 1, value: 2\niteration number: 2, value: 2\niteration number: 3, value: 4\niteration number: 4, value: 8\niteration number: 5, value: 16\n" ], [ "# Changes all the values from a list\n\nfor i in range(0, len(my_list)):\n my_list[i] = 0\n \nprint(\"new list: {}\".format(my_list))", "new list: [0, 0, 0, 0, 0, 0]\n" ] ], [ [ "## While", "_____no_output_____" ] ], [ [ "# while\n\n# iterates a list to retrieve data\n\nmy_list = [1, 2, 2, 4, 8, 16]\n\ni = 0\n\nwhile i < len(my_list):\n value = my_list[i]\n print(\"iteration number: {}, value: {}\".format(i, value))\n i += 1\n", "iteration number: 0, value: 1\niteration number: 1, value: 2\niteration number: 2, value: 2\niteration number: 3, value: 4\niteration number: 4, value: 8\niteration number: 5, value: 16\n" ], [ "# Changes all the values from a list\n\ni = 0\nwhile i < len(my_list):\n my_list[i] = 0\n i += 1\n \nprint(\"new list: {}\".format(my_list))", "new list: [0, 0, 0, 0, 0, 0]\n" ] ], [ [ "# Conditionals and control structures\n\nOne of the most important topics to any programmer, control structures allows your code to take decisions based on pre-set conditions. Python supports the below control and conditionals definitions:\n\n- if --> Evaluates the value of a variable\n- is --> Complements any if condition, _is_ is the conector to the conditional to evaluate a variable's value\n- else --> Complements any if condition, the else statement is executed if the previous _if_ does not reach the condition\n- Logic operators: <, >, <=, >=, ==, !=\n- in --> Mostly used when working with lists, allows to the programmer to know if a value is present inside a given list or tuple.", "_____no_output_____" ] ], [ [ "text = \"this is a text\"\nif text == \"this is a text\":\n print(\"text matches!\")", "text matchsh\n" ], [ "text = \"this is a text\"\nif text == \"this text does not match\":\n print(\"text matches!\")\nelse:\n print(\"texts are different\")", "texts are different\n" ], [ "# Verifies if a number is between the range 1 < x < 5\n\ndef is_in_range(my_num):\n if my_num > 1 and my_num < 5:\n print(\"number {} is in range\".format(my_num))\n else:\n print(\"number {} is not in range\".format(my_num))\n\n# Tests\n\nis_in_range(10)\nis_in_range(2)\nis_in_range(-1)\n ", "number 10 is not in range\nnumber 2 is in range\nnumber -1 is not in range\n" ], [ "# iterates a list and prints values only if index is odd\n\nmy_list = [1, 2, 2, 4, 8, 16]\n\nfor i in range(0, len(my_list)):\n value = my_list[i]\n if i%2 > 0:\n print(\"iteration number: {}, value: {}\".format(i, value))", "iteration number: 1, value: 2\niteration number: 3, value: 4\niteration number: 5, value: 16\n" ], [ "# verifies if a number is in a list\n\nmy_list = [1, 2, 3, 4, 5]\n\ndef is_in_list(my_num):\n if my_num in my_list:\n print(\"number {} is inside the list\".format(my_num))\n else:\n print(\"number {} is not inside the list\".format(my_num))\n\n# Tests\n\nis_in_list(5)\nis_in_list(0)", "number 5 is inside the list\nnumber 0 is not inside the list\n" ] ], [ [ "## Exercises\n\n1. Write a Python function that takes two lists and returns True if they have at least one common\n2. Write a Python program to print a specified list after removing the 0th, 4th and 5th elements.\n\n ```\n Sample List : ['Red', 'Green', 'White', 'Black', 'Pink', 'Yellow']\n Expected Output : ['Green', 'White', 'Black']\n ```\n3. Write a Python program to print the numbers of a specified list after removing even numbers\n4. **Challenge** Write a Python program to find those numbers which are divisible by 7 and multiple of 5, between 1500 and 2700 (both included)\n5. **Challenge** Write a Python program to construct the following pattern, using a nested for loop.\n\no \no o \no o o \no o o o \no o o o o \no o o o \no o o \no o \no\n\n6. Write a Python program which takes two digits m (row) and n (column) as input and generates a two-dimensional array. The element value in the i-th row and j-th column of the array should be i*j. \n\n ```\n Note :\n i = 0,1.., m-1\n j = 0,1, n-1.\n\n Test Data : Rows = 3, Columns = 4\n Expected Result : [[0, 0, 0, 0], [0, 1, 2, 3], [0, 2, 4, 6]]\n ```\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ] ]
c52f4c12ecf0035e0aafdca4580f10555e789cf6
23,976
ipynb
Jupyter Notebook
5.5 Hypothesis Testing Fundamentals/Significance & Hypothesis Testing.ipynb
orspain/Dataquest
8d6817e89afd7e88397a76c2a638aa5aca77d99f
[ "Apache-2.0" ]
9
2020-04-16T17:23:53.000Z
2021-09-25T13:59:10.000Z
5.5 Hypothesis Testing Fundamentals/Significance & Hypothesis Testing.ipynb
orspain/Dataquest
8d6817e89afd7e88397a76c2a638aa5aca77d99f
[ "Apache-2.0" ]
null
null
null
5.5 Hypothesis Testing Fundamentals/Significance & Hypothesis Testing.ipynb
orspain/Dataquest
8d6817e89afd7e88397a76c2a638aa5aca77d99f
[ "Apache-2.0" ]
16
2019-12-15T19:52:27.000Z
2022-02-28T19:22:15.000Z
73.772308
5,996
0.768393
[ [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n# TODO Read in weight_loss.csv\n# Assign variables to columns", "_____no_output_____" ], [ "mean_group_a = np.mean(weight_lost_a)\nmean_group_b = np.mean(weight_lost_b)\n\nplt.hist(weight_lost_a)\nplt.show()\nplt.hist(weight_lost_b)\nplt.show()", "_____no_output_____" ], [ "mean_difference = mean_group_b - mean_group_a\nprint(mean_difference)", "_____no_output_____" ], [ "mean_difference = 2.52\nprint(all_values)\nmean_differences = []\nfor i in range(1000):\n group_a = []\n group_b = []\n for value in all_values:\n assignment_chance = np.random.rand()\n if assignment_chance >= 0.5:\n group_a.append(value)\n else:\n group_b.append(value)\n iteration_mean_difference = np.mean(group_b) - np.mean(group_a)\n mean_differences.append(iteration_mean_difference)\n \nplt.hist(mean_differences)\nplt.show()", "_____no_output_____" ], [ "sampling_distribution = {}\nfor df in mean_differences:\n if sampling_distribution.get(df, False):\n sampling_distribution[df] = sampling_distribution[df] + 1\n else:\n sampling_distribution[df] = 1", "_____no_output_____" ], [ "frequencies = []\nfor sp in sampling_distribution.keys():\n if sp >= 2.52:\n frequencies.append(sampling_distribution[sp])\np_value = np.sum(frequencies) / 1000", "_____no_output_____" ] ], [ [ "Chi-squared tests - creating distribution", "_____no_output_____" ] ], [ [ "chi_squared_values = []\nfrom numpy.random import random\nimport matplotlib.pyplot as plt\n\nfor i in range(1000):\n sequence = random((32561,))\n sequence[sequence < .5] = 0\n sequence[sequence >= .5] = 1\n male_count = len(sequence[sequence == 0])\n female_count = len(sequence[sequence == 1])\n male_diff = (male_count - 16280.5) ** 2 / 16280.5\n female_diff = (female_count - 16280.5) ** 2 / 16280.5\n chi_squared = male_diff + female_diff\n chi_squared_values.append(chi_squared)\n\nplt.hist(chi_squared_values)", "_____no_output_____" ], [ "chi_squared_values = []\nfrom numpy.random import random\nimport matplotlib.pyplot as plt\n\n# loop 1000 times\nfor i in range(1000):\n # numpy random generating 300 numbers between 0.0 and 1.0.\n # get a vector with 300 elements.\n sequence = random((300,))\n # \n # if it is less than .5, replace it with 0 \n sequence[sequence < .5] = 0\n \n # otherwise replace it with 1\n sequence[sequence >= .5] = 1\n \n # Compute the male_diff by subtracting the expected Male count (150) \n # from the observed Male count, squaring it, \n #and dividing by the expected Male count. Do the same for female_diff\n male_count = len(sequence[sequence == 0])\n female_count = len(sequence[sequence == 1])\n male_diff = (male_count - 150) ** 2 / 150\n female_diff = (female_count - 150) ** 2 / 150\n \n # find the chi squared\n chi_squared = male_diff + female_diff\n # append the values\n chi_squared_values.append(chi_squared)\n\nplt.hist(chi_squared_values)", "_____no_output_____" ], [ "diffs = []\nobserved = [27816, 3124, 1039, 311, 271]\nexpected = [26146.5, 3939.9, 944.3, 260.5, 1269.8]\n\nfor i, obs in enumerate(observed):\n exp = expected[i]\n diff = (obs - exp) ** 2 / exp\n diffs.append(diff)\n \nrace_chisq = sum(diffs)", "_____no_output_____" ], [ "from scipy.stats import chisquare\n\nobserved = np.array([27816, 3124, 1039, 311, 271])\nexpected = np.array([26146.5, 3939.9, 944.3, 260.5, 1269.8])\n\nchisquare_value, race_pvalue = chisquare(observed, expected)", "_____no_output_____" ], [ "table = pd.crosstab(income[\"sex\"], [income[\"race\"]])\nprint(table)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
c52f5148af0d945d512bf41a24b8e345839a9350
8,495
ipynb
Jupyter Notebook
Python Advance Programming/Advance Programming Assignment 9.ipynb
Sayan97/Python
13b28a9187bc8ad95948f89081bea8603197c791
[ "MIT" ]
null
null
null
Python Advance Programming/Advance Programming Assignment 9.ipynb
Sayan97/Python
13b28a9187bc8ad95948f89081bea8603197c791
[ "MIT" ]
null
null
null
Python Advance Programming/Advance Programming Assignment 9.ipynb
Sayan97/Python
13b28a9187bc8ad95948f89081bea8603197c791
[ "MIT" ]
null
null
null
32.547893
336
0.524308
[ [ [ "# Assignment 09 Solutions", "_____no_output_____" ], [ "#### 1. YouTube offers different playback speed options for users. This allows users to increase or decrease the speed of the video content. Given the actual duration and playback speed of the video, calculate the playback duration of the video.\n**Examples:** \n`playback_duration(\"00:30:00\", 2) ➞ \"00:15:00\"` \n`playback_duration(\"01:20:00\", 1.5) ➞ \"00:53:20\"` \n`playback_duration(\"51:20:09\", 0.5) ➞ \"102:40:18\"` ", "_____no_output_____" ] ], [ [ "def playback_duration(in_time,playback_speed):\n time = in_time.split(\":\")\n time_in_secs = (3600*int(time[0])+60*int(time[1])+int(time[2]))/playback_speed\n f_time_in_hours = str(int(time_in_secs/3600)) if time_in_secs > 3600 else '00'\n f_time_in_mins = str(int((time_in_secs%3600)/60)) if (time_in_secs)%3600 > 60 else '00'\n f_time_in_secs = str(int((time_in_secs%3600)%60)) if ((time_in_secs)%3600)%60 > 0 else '00' \n output = f'{f_time_in_hours}:{f_time_in_mins}:{f_time_in_secs}'\n print(f'playback_duration{in_time, playback_speed} ➞ {output}')\n \nplayback_duration(\"00:30:00\", 2)\nplayback_duration(\"01:20:00\", 1.5)\nplayback_duration(\"51:20:09\", 0.5) ", "playback_duration('00:30:00', 2) ➞ 00:15:00\nplayback_duration('01:20:00', 1.5) ➞ 00:53:20\nplayback_duration('51:20:09', 0.5) ➞ 102:40:18\n" ] ], [ [ "#### 2. We needs your help to construct a building which will be a pile of n cubes. The cube at the bottom will have a volume of n^3, the cube above will have volume of (n-1)^3 and so on until the top which will have a volume of 1^3.\nGiven the total volume m of the building, can you find the number of cubes n required for the building? \nIn other words, you have to return an integer n such that: `n^3 + (n-1)^3 + ... + 1^3 == m` \nReturn None if there is no such number. \n**Examples:** \n`pile_of_cubes(1071225) ➞ 45` \n`pile_of_cubes(4183059834009) ➞ 2022` \n`pile_of_cubes(16) ➞ None` ", "_____no_output_____" ] ], [ [ "def pile_of_cubes(in_volume):\n out_volume = 0\n output = 0\n for cube in range(1,in_volume):\n out_volume += pow(cube,3)\n if in_volume <= out_volume:\n output = cube if in_volume == out_volume else None\n break\n print(f'pile_of_cubes({in_volume}) ➞ {output}')\n\npile_of_cubes(1071225)\npile_of_cubes(4183059834009)\npile_of_cubes(16)", "pile_of_cubes(1071225) ➞ 45\npile_of_cubes(4183059834009) ➞ 2022\npile_of_cubes(16) ➞ None\n" ] ], [ [ "#### 3. A fulcrum of a list is an integer such that all elements to the left of it and all elements to the right of it sum to the same value. Write a function that finds the fulcrum of a list.\n**To illustrate:** \n`find_fulcrum([3, 1, 5, 2, 4, 6, -1]) ➞ 2` // Since [3, 1, 5] and [4, 6, -1] both sum to 9 \n**Examples:** \n`find_fulcrum([1, 2, 4, 9, 10, -10, -9, 3]) ➞ 4` \n`find_fulcrum([9, 1, 9]) ➞ 1` \n`find_fulcrum([7, -1, 0, -1, 1, 1, 2, 3]) ➞ 0` \n`find_fulcrum([8, 8, 8, 8]) ➞ -1` ", "_____no_output_____" ] ], [ [ "def find_fulcrum(in_list):\n output = -1\n for ele in in_list:\n index_of_ele =in_list.index(ele)\n if sum(in_list[:index_of_ele]) == sum(in_list[index_of_ele+1:]):\n output = ele\n break\n print(f'find_fulcrum({in_list}) ➞ {output}')\n \nfind_fulcrum([3, 1, 5, 2, 4, 6, -1])\nfind_fulcrum([1, 2, 4, 9, 10, -10, -9, 3])\nfind_fulcrum([9, 1, 9])\nfind_fulcrum([7, -1, 0, -1, 1, 1, 2, 3])\nfind_fulcrum([8, 8, 8, 8])", "find_fulcrum([3, 1, 5, 2, 4, 6, -1]) ➞ 2\nfind_fulcrum([1, 2, 4, 9, 10, -10, -9, 3]) ➞ 4\nfind_fulcrum([9, 1, 9]) ➞ 1\nfind_fulcrum([7, -1, 0, -1, 1, 1, 2, 3]) ➞ 0\nfind_fulcrum([8, 8, 8, 8]) ➞ -1\n" ] ], [ [ "#### 4. Given a list of integers representing the color of each sock, determine how many pairs of socks with matching colors there are. For example, there are 7 socks with colors [1, 2, 1, 2, 1, 3, 2]. There is one pair of color 1 and one of color 2. There are three odd socks left, one of each color. The number of pairs is 2.\nCreate a function that returns an integer representing the number of matching pairs of socks that are available. \n**Examples:** \n`sock_merchant([10, 20, 20, 10, 10, 30, 50, 10, 20]) ➞ 3` \n`sock_merchant([50, 20, 30, 90, 30, 20, 50, 20, 90]) ➞ 4` \n`sock_merchant([]) ➞ 0` ", "_____no_output_____" ] ], [ [ "def sock_merchant(in_list):\n paired_socks = {}\n output = 0\n for ele in in_list:\n if ele in paired_socks:\n paired_socks[ele]+=1\n else:\n paired_socks[ele]=1\n for pair in paired_socks.values():\n output += pair//2\n print(f'sock_merchant({in_list}) ➞ {output}')\n \n\nsock_merchant([10, 20, 20, 10, 10, 30, 50, 10, 20])\nsock_merchant([50, 20, 30, 90, 30, 20, 50, 20, 90])\nsock_merchant([])", "sock_merchant([10, 20, 20, 10, 10, 30, 50, 10, 20]) ➞ 3\nsock_merchant([50, 20, 30, 90, 30, 20, 50, 20, 90]) ➞ 4\nsock_merchant([]) ➞ 0\n" ] ], [ [ "#### 5. Create a function that takes a string containing integers as well as other characters and return the sum of the negative integers only.\n**Examples:** \n`negative_sum(\"-12 13%14&-11\") ➞ -23` \n`# -12 + -11 = -23` \n`negative_sum(\"22 13%14&-11-22 13 12\") ➞ -33` \n`# -11 + -22 = -33` ", "_____no_output_____" ] ], [ [ "import re\ndef negative_sum(in_string):\n pattern = '-\\d+'\n output = sum([int(ele) for ele in re.findall(pattern,in_string)])\n print(f'negative_sum(\"{in_string}\") ➞ {output}')\n \nnegative_sum(\"-12 13%14&-11\")\nnegative_sum(\"22 13%14&-11-22 13 12\")", "negative_sum(\"-12 13%14&-11\") ➞ -23\nnegative_sum(\"22 13%14&-11-22 13 12\") ➞ -33\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
c52f5af463267a881ffc0b1938e95a730a7a0336
72,884
ipynb
Jupyter Notebook
iLearn/py/practice/1-basic/notes/4.Functions-Modules.ipynb
chisomloius/iLearnPy-
e00a622cc22bdc90e7a849d2705e0ad41ff0b461
[ "MIT" ]
null
null
null
iLearn/py/practice/1-basic/notes/4.Functions-Modules.ipynb
chisomloius/iLearnPy-
e00a622cc22bdc90e7a849d2705e0ad41ff0b461
[ "MIT" ]
null
null
null
iLearn/py/practice/1-basic/notes/4.Functions-Modules.ipynb
chisomloius/iLearnPy-
e00a622cc22bdc90e7a849d2705e0ad41ff0b461
[ "MIT" ]
null
null
null
24.917607
688
0.543837
[ [ [ "![logo.png](image/logo.png)\n# Functions & Modules\n\n### You can access this notebook on: \n[colab](https://colab.com/py/), [github](https://github.com/chisomloius/iLearnPy/), [kaggle](https://kaggle.com/chisomloius/ilearnPy/), [medium](https://medium.com/@chisomloius/ilearnPy/), [web](https://chisomloius.github.io), [zindi](https://zindi.com/@chisomloius/ilearnPy/)", "_____no_output_____" ], [ "# Table of Contents\n\n### Click on the links to go directly to specific sections on the notebook.\n\n\n1. [Import Dependencies](#dependencies)\n<br>\n2. [File Handling: Read /Write/Delete Functions](#read-write)\n<br>\n3. [Functions: Definition, Arguments, Scopes](#functions)\n<br>\n4. [Functions: Concatenation, Lambdas, Iterators, Generators](#lambdas)\n<br>\n5. [Error Handling: Try and Except Handling, Error blocking and Error Tracing](#errors)\n<br>\n6. [Assignment Link](#assignment-link)\n<br>\n7. [After Thoughts](#after-thoughts)\n<br> \n8. [About Author](#about)\n<br> \n9. [More Info](#more-info)\n<br>\n<p>Estimated time needed: <strong>50 min</strong></p>\n\n----\n", "_____no_output_____" ], [ "### Python - Let's get you writing some Python Code now!</h1>\n<p><strong>Welcome!</strong> This notebook will teach you the basics of the Python programming language. Although the information presented here is quite basic, it is an important foundation that will help you read and write Python code. By the end of this notebook, you'll know the basics of Python, including how to write basic commands, understand some basic types, and how to perform simple operations on them.</p> ", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-info\" style=\"margin-top: 20px\">\n<a id='dependencies'></a>\n \n### Import Dependencies \n</div>", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-info\" style=\"margin-top: 20px\">\n<a id='read-write'></a>\n \n### File Handling: Read /Write/Delete Functions \n</div>", "_____no_output_____" ], [ "<p><strong>Welcome!</strong> This notebook will teach you about read and write the text to file in the Python Programming Language. By the end of this lab, you'll know how to write to file and copy the file.</p>", "_____no_output_____" ], [ "#### Reading Text Files", "_____no_output_____" ], [ "One way to read or write a file in Python is to use the built-in <code>open</code> function. The <code>open</code> function provides a <b>File object</b> that contains the methods and attributes you need in order to read, save, and manipulate the file. In this notebook, we will only cover <b>.txt</b> files. The first parameter you need is the file path and the file name. An example is shown as follow:", "_____no_output_____" ], [ "<img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%204/Images/ReadOpen.png\" width=\"500\" />", "_____no_output_____" ], [ " The mode argument is optional and the default value is <b>r</b>. In this notebook we only cover two modes: \n<ul>\n <li><b>r</b> Read mode for reading files </li>\n <li><b>w</b> Write mode for writing files</li>\n</ul>", "_____no_output_____" ], [ "For the next example, we will use the text file <b>Example1.txt</b>. The file is shown as follow:", "_____no_output_____" ], [ "<img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%204/Images/ReadFile.png\" width=\"200\" />", "_____no_output_____" ], [ " We read the file: ", "_____no_output_____" ] ], [ [ "# assigned_name1 = \"filepath\"\n# assigned_name2 = open(assigned_name, 'r')", "_____no_output_____" ], [ "# Read the Example1.txt\nexample1 = r\"..\\data\\Example_1.txt\"\n\nfile1 = open(example1, \"r\")\nfile1", "_____no_output_____" ] ], [ [ " We can view the attributes of the file.", "_____no_output_____" ], [ "The name of the file:", "_____no_output_____" ] ], [ [ "# Print the path of file\nfile1.name", "_____no_output_____" ] ], [ [ " The mode the file object is in:", "_____no_output_____" ] ], [ [ "# Print the mode of file, either 'r' or 'w'\nfile1.mode", "_____no_output_____" ] ], [ [ "We can read the file and assign it to a variable :", "_____no_output_____" ] ], [ [ "# Read the file\nFileContent = file1.read()\nFileContent", "_____no_output_____" ] ], [ [ "The <b>/n</b> means that there is a new line. ", "_____no_output_____" ], [ "We can print the file: ", "_____no_output_____" ] ], [ [ "# Print the file with '\\n' as a new line\nprint(FileContent)", "This is line A\nThis is line B\nThis is line C\nThis is line D\n\n" ] ], [ [ "The file is of type string:", "_____no_output_____" ] ], [ [ "# Type of file content\ntype(FileContent)", "_____no_output_____" ] ], [ [ " We must close the file object:", "_____no_output_____" ] ], [ [ "# Close file after finish\nfile1.close()", "_____no_output_____" ] ], [ [ "##### A Better Way to Open a File", "_____no_output_____" ], [ "Using the <code>with</code> statement is better practice, it automatically closes the file even if the code encounters an exception. The code will run everything in the indent block then close the file object. ", "_____no_output_____" ] ], [ [ "# Open file using with\nwith open(example1, \"r\") as f:\n Content = f.read()\n print(Content)", "This is line A\nThis is line B\nThis is line C\nThis is line D\n\n" ] ], [ [ "The file object is closed, you can verify it by running the following cell: ", "_____no_output_____" ] ], [ [ "# Verify if the file is closed\nf.closed", "_____no_output_____" ] ], [ [ " We can see the info in the file:", "_____no_output_____" ] ], [ [ "# See the content of file\nprint(Content)", "This is line A\nThis is line B\nThis is line C\nThis is line D\n\n" ] ], [ [ "The syntax is a little confusing as the file object is after the <code>as</code> statement. We also don’t explicitly close the file. Therefore we summarize the steps in a figure:", "_____no_output_____" ], [ "<img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%204/Images/ReadWith.png\" width=\"500\" />", "_____no_output_____" ], [ "We don’t have to read the entire file, for example, we can read the first 4 characters by entering three as a parameter to the method **.read()**:\n", "_____no_output_____" ] ], [ [ "# Read first four characters\nwith open(example1, \"r\") as f:\n print(f.read(4))", "This\n" ] ], [ [ "Once the method <code>.read(4)</code> is called the first 4 characters are called. If we call the method again, the next 4 characters are called. The output for the following cell will demonstrate the process for different inputs to the method <code>read()</code>:", "_____no_output_____" ] ], [ [ "# Read certain amount of characters\nwith open(example1, \"r\") as f:\n print(f.read(2))\n print(f.read(4))\n print(f.read(7))\n print(f.read(15))", "Th\nis i\ns line \nA\nThis is line \n" ] ], [ [ "The process is illustrated in the below figure, and each color represents the part of the file read after the method <code>read()</code> is called:", "_____no_output_____" ], [ "<img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%204/Images/ReadChar.png\" width=\"500\" />", "_____no_output_____" ], [ " Here is an example using the same file, but instead we read 16, 5, and then 9 characters at a time: ", "_____no_output_____" ] ], [ [ "# Read certain amount of characters\nwith open(example1, \"r\") as f:\n print(f.read(16))\n print(f.read(5))\n print(f.read(9))", "This is line A\nT\nhis i\ns line B\n\n" ] ], [ [ "We can also read one line of the file at a time using the method <code>readline()</code>: ", "_____no_output_____" ] ], [ [ "# Read one line\nwith open(example1, \"r\") as f:\n print(\"first line: \" + f.readline())", "first line: This is line A\n\n" ] ], [ [ " We can use a loop to iterate through each line: \n", "_____no_output_____" ] ], [ [ "# Iterate through the lines\nwith open(example1,\"r\") as f:\n i = 0;\n for line in f:\n print(\"Iteration\", str(i), \": \", line)\n i = i + 1;", "Iteration 0 : This is line A\n\nIteration 1 : This is line B\n\nIteration 2 : This is line C\n\nIteration 3 : This is line D\n\n" ] ], [ [ "We can use the method <code>readlines()</code> to save the text file to a list: ", "_____no_output_____" ] ], [ [ "# Read all lines and save as a list\nwith open(example1, \"r\") as f:\n FileasList = f.readlines()", "_____no_output_____" ] ], [ [ " Each element of the list corresponds to a line of text:", "_____no_output_____" ] ], [ [ "# Print the first line\nFileasList[0]", "_____no_output_____" ], [ "# Print the second line\n\nFileasList[1]", "_____no_output_____" ], [ "# Print the third line\n\nFileasList[2]", "_____no_output_____" ] ], [ [ "#### Writing Files", "_____no_output_____" ], [ " We can open a file object using the method <code>write()</code> to save the text file to a list. To write the mode, argument must be set to write <b>w</b>. Let’s write a file <b>Example2.txt</b> with the line: <b>“This is line A”</b>", "_____no_output_____" ] ], [ [ "# Write line to file\npath = r'..\\data\\Example_2.txt'\n\nwith open(path, 'w') as w:\n w.write(\"This is line A\")", "_____no_output_____" ] ], [ [ " We can read the file to see if it worked:", "_____no_output_____" ] ], [ [ "# Read file\nwith open(path, 'r') as t:\n print(t.read())", "This is line A\n" ] ], [ [ "We can write multiple lines:", "_____no_output_____" ] ], [ [ "# Write lines to file\n\nwith open(r'..\\data\\Example2.txt', 'w') as w:\n w.write(\"This is line A\\n\")\n w.write(\"This is line B\\n\")", "_____no_output_____" ] ], [ [ "The method <code>.write()</code> works similar to the method <code>.readline()</code>, except instead of reading a new line it writes a new line. The process is illustrated in the figure , the different colour coding of the grid represents a new line added to the file after each method call.", "_____no_output_____" ], [ "<img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%204/Images/WriteLine.png\" width=\"500\" />", "_____no_output_____" ], [ "You can check the file to see if your results are correct ", "_____no_output_____" ] ], [ [ "# Check whether write to file\n\nwith open(r'..\\data\\Example_2.txt', 'r') as t:\n print(t.read())", "This is line A\n" ] ], [ [ " By setting the mode argument to append **a** you can append a new line as follows:", "_____no_output_____" ] ], [ [ "# Write a new line to text file\n\nwith open(r'..\\data\\Example_2.txt', 'a') as t:\n t.write(\"This is line C\\n\")\n t.write(\"This is line D\\n\")", "_____no_output_____" ] ], [ [ " You can verify the file has changed by running the following cell:", "_____no_output_____" ] ], [ [ "# Verify if the new line is in the text file\n\nwith open(r'..\\data\\Example_2.txt', 'r') as f:\n print(f.read())", "_____no_output_____" ] ], [ [ " We write a list to a <b>.txt</b> file as follows:", "_____no_output_____" ] ], [ [ "# Sample list of text\nLines = [\"This is line A\\n\", \"This is line B\\n\", \"This is line C\\n\"]\nLines", "_____no_output_____" ], [ "# Write the strings in the list to text file\n\nwith open('Example2.txt', 'w') as f:\n for line in Lines:\n print(line)\n f.write(line)", "_____no_output_____" ] ], [ [ " We can verify the file is written by reading it and printing out the values: ", "_____no_output_____" ] ], [ [ "# Verify if writing to file is successfully executed\n\nwith open('Example_2.txt', 'r') as f:\n print(f.read())", "_____no_output_____" ] ], [ [ "We can again append to the file by changing the second parameter to <b>a</b>. This adds the code:", "_____no_output_____" ] ], [ [ "# Append the line to the file\n\nwith open('Example_2.txt', 'a') as t:\n t.write(\"This is line D\\n\")", "_____no_output_____" ] ], [ [ "We can see the results of appending the file: ", "_____no_output_____" ] ], [ [ "# Verify if the appending is successfully executed\n\nwith open('Example2.txt', 'r') as t:\n print(t.read())", "_____no_output_____" ] ], [ [ "#### Copy a File", "_____no_output_____" ], [ "Let's copy the file <b>Example2.txt</b> to the file <b>Example3.txt</b>:", "_____no_output_____" ] ], [ [ "# Copy file to another\nwith open('Example2.txt','r') as r:\n with open('Example3.txt','w') as w:\n for line in r:\n w.write(line)", "_____no_output_____" ] ], [ [ "We can read the file to see if everything works:", "_____no_output_____" ] ], [ [ "# Verify if the copy is successfully executed\n\nwith open('Example3.txt','r') as t:\n print(t.read())", "_____no_output_____" ], [ "# add an extra line to example3.txt\nwith open('Example3.txt', 'a') as t:\n t.write('This is line E \\n')", "_____no_output_____" ], [ "# confirm that the extra line has been added\nwith open('Example3.txt', 'r') as t:\n print(t.read())", "_____no_output_____" ] ], [ [ " After reading files, we can also write data into files and save them in different file formats like **.txt, .csv, .xls (for excel files) etc**. Let's take a look at some examples.", "_____no_output_____" ], [ "Now go to the directory to ensure the <b>.txt</b> file exists and contains the summary data that we wrote.", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-info\" style=\"margin-top: 20px\">\n<a id='functions'></a>\n \n### Functions: Definition, Arguments, Scopes\n</div>", "_____no_output_____" ], [ "#### Functions in Python", "_____no_output_____" ], [ "<p><strong>Welcome!</strong> This notebook will teach you about the functions in the Python Programming Language. By the end of this lab, you'll know the basic concepts about function, variables, and how to use functions.</p>", "_____no_output_____" ], [ "A function is a reusable block of code which performs operations specified in the function. They let you break down tasks and allow you to reuse your code in different programs.\n\nThere are two types of functions :\n\n- <b>Pre-defined functions</b>\n- <b>User defined functions</b>", "_____no_output_____" ], [ "<h3 id=\"content\">What is a Function?</h3>", "_____no_output_____" ], [ "You can define functions to provide the required functionality. Here are simple rules to define a function in Python:\n- Functions blocks begin <code>def</code> followed by the function <code>name</code> and parentheses <code>()</code>.\n- There are input parameters or arguments that should be placed within these parentheses. \n- You can also define parameters inside these parentheses.\n- There is a body within every function that starts with a colon (<code>:</code>) and is indented.\n- You can also place documentation before the body \n- The statement <code>return</code> exits a function, optionally passing back a value \n\nAn example of a function that adds on to the parameter <code>a</code> prints and returns the output as <code>b</code>:", "_____no_output_____" ] ], [ [ "# First function example: Add 1 to a and store as b\n\ndef add(a):\n \"\"\"\n this function add a value \n to argument a\n \"\"\"\n b = a + 1\n print(a, \"if you add one\", b)\n return b", "_____no_output_____" ] ], [ [ "The figure below illustrates the terminology: ", "_____no_output_____" ], [ "<img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%203/Images/FuncsDefinition.png\" width=\"500\" /> ", "_____no_output_____" ], [ "We can obtain help about a function :", "_____no_output_____" ] ], [ [ "# Get a help on add function\n\nhelp(add)", "Help on function add in module __main__:\n\nadd(a)\n this function add a value to argument a\n\n" ] ], [ [ "We can call the function:", "_____no_output_____" ] ], [ [ "# Call the function add()\n\nadd(16)", "16 if you add one 17\n" ] ], [ [ "If we call the function with a new input we get a new result:", "_____no_output_____" ] ], [ [ "# Call the function add()\n\nadd(2)", "2 if you add one 3\n" ] ], [ [ "We can create different functions. For example, we can create a function that multiplies two numbers. The numbers will be represented by the variables <code>a</code> and <code>b</code>:", "_____no_output_____" ] ], [ [ "# Define a function for multiple two numbers\n\ndef Mult(a, b):\n c = a * b\n return(c)", "_____no_output_____" ] ], [ [ "The same function can be used for different data types. For example, we can multiply two integers:\n", "_____no_output_____" ] ], [ [ "# Use mult() multiply two integers\n\nMult(2, 3)", "_____no_output_____" ] ], [ [ " Two Floats: ", "_____no_output_____" ] ], [ [ "# Use mult() multiply two floats\n\nMult(10.0, 3.14)", "_____no_output_____" ] ], [ [ "We can even replicate a string by multiplying with an integer: ", "_____no_output_____" ] ], [ [ "# Use mult() multiply two different type values together\n\nMult(2, \"Michael Jackson \")", "_____no_output_____" ] ], [ [ "#### Variables", "_____no_output_____" ], [ "The input to a function is called a formal parameter.\n\nA variable that is declared inside a function is called a local variable. The parameter only exists within the function (i.e. the point where the function starts and stops). \n\nA variable that is declared outside a function definition is a global variable, and its value is accessible and modifiable throughout the program. We will discuss more about global variables at the end of the lab.\n", "_____no_output_____" ] ], [ [ "# Function Definition\n\ndef square(a):\n \n # Local variable b\n b = 1\n c = a * a + b\n print(a, \"if you square a + 1\", c) \n return(c)\n\n\nsquare(3)", "3 if you square a + 1 10\n" ] ], [ [ "The labels are displayed in the figure: ", "_____no_output_____" ], [ "<img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%203/Images/FuncsVar.png\" width=\"500\" />", "_____no_output_____" ], [ "We can call the function with an input of <b>3</b>:", "_____no_output_____" ] ], [ [ "# Initializes Global variable \nx = 3\n# Makes function call and return function a y\ny = square(x)\ny", "3 if you square a + 1 10\n" ] ], [ [ " We can call the function with an input of <b>2</b> in a different manner:", "_____no_output_____" ] ], [ [ "# Directly enter a number as parameter\n\nsquare(2)", "2 if you square a + 1 5\n" ] ], [ [ "If there is no <code>return</code> statement, the function returns <code>None</code>. The following two functions are equivalent:", "_____no_output_____" ] ], [ [ "# Define functions, one with return value None and other without return value\n\ndef MJ():\n print('Michael Jackson')\n \ndef MJ1():\n print('Michael Jackson')\n return(None)", "_____no_output_____" ], [ "# See the output\n\nMJ()", "Michael Jackson\n" ], [ "# See the output\n\nMJ1()", "Michael Jackson\n" ] ], [ [ "Printing the function after a call reveals a **None** is the default return statement:", "_____no_output_____" ] ], [ [ "# See what functions returns are\n\nprint(MJ())\nprint(MJ1())", "Michael Jackson\nNone\nMichael Jackson\nNone\n" ] ], [ [ "Create a function <code>con</code> that concatenates two strings using the addition operation:", "_____no_output_____" ] ], [ [ "# Define the function for combining strings\n\ndef con(a, b):\n return(a + b)", "_____no_output_____" ], [ "# Test on the con() function\n\ncon(\"This \", \"is \")", "_____no_output_____" ] ], [ [ "<hr/>\n <div class=\"alert alert-success alertsuccess\" style=\"margin-top: 20px\">\n <h4> [Tip] How do I learn more about the pre-defined functions in Python? </h4>\n <p>We will be introducing a variety of pre-defined functions to you as you learn more about Python. There are just too many functions, so there's no way we can teach them all in one sitting. But if you'd like to take a quick peek, here's a short reference card for some of the commonly-used pre-defined functions: <a href=\"http://www.astro.up.pt/~sousasag/Python_For_Astronomers/Python_qr.pdf\">Reference</a></p>\n </div>\n<hr/>", "_____no_output_____" ], [ "#### Functions Make Things Simple", "_____no_output_____" ], [ "Consider the two lines of code in <b>Block 1</b> and <b>Block 2</b>: the procedure for each block is identical. The only thing that is different is the variable names and values.", "_____no_output_____" ], [ "<h4>Block 1:</h4>", "_____no_output_____" ] ], [ [ "# a and b calculation block1\n\na1 = 4\nb1 = 5\nc1 = a1 + b1 + 2 * a1 * b1 - 1\nif(c1 < 0):\n c1 = 0 \nelse:\n c1 = 5\nc1 ", "_____no_output_____" ] ], [ [ "<h4>Block 2:</h4>", "_____no_output_____" ] ], [ [ "# a and b calculation block2\n\na2 = 0\nb2 = 0\nc2 = a2 + b2 + 2 * a2 * b2 - 1\nif(c2 < 0):\n c2 = 0 \nelse:\n c2 = 5\nc2 ", "_____no_output_____" ] ], [ [ "We can replace the lines of code with a function. A function combines many instructions into a single line of code. Once a function is defined, it can be used repeatedly. You can invoke the same function many times in your program. You can save your function and use it in another program or use someone else’s function. The lines of code in code <b>Block 1</b> and code <b>Block 2</b> can be replaced by the following function: ", "_____no_output_____" ] ], [ [ "# Make a Function for the calculation above\n\ndef Equation(a,b):\n c = a + b + 2 * a * b - 1\n if(c < 0):\n c = 0 \n else:\n c = 5\n return(c) ", "_____no_output_____" ] ], [ [ "This function takes two inputs, a and b, then applies several operations to return c. \nWe simply define the function, replace the instructions with the function, and input the new values of <code>a1</code>, <code>b1</code> and <code>a2</code>, <code>b2</code> as inputs. The entire process is demonstrated in the figure: ", "_____no_output_____" ], [ "<img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%203/Images/FuncsPros.gif\" width=\"850\" />", "_____no_output_____" ], [ "Code **Blocks 1** and **Block 2** can now be replaced with code **Block 3** and code **Block 4**.", "_____no_output_____" ], [ "<h4>Block 3:</h4>", "_____no_output_____" ] ], [ [ "a1 = 4\nb1 = 5\nc1 = Equation(a1, b1)\nc1", "_____no_output_____" ] ], [ [ "<h4>Block 4:</h4>", "_____no_output_____" ] ], [ [ "a2 = 0\nb2 = 0\nc2 = Equation(a2, b2)\nc2", "_____no_output_____" ] ], [ [ "<hr>", "_____no_output_____" ], [ "#### Pre-defined functions", "_____no_output_____" ], [ "There are many pre-defined functions in Python, so let's start with the simple ones.", "_____no_output_____" ], [ "The <code>print()</code> function:", "_____no_output_____" ] ], [ [ "# Build-in function print()\n\nalbum_ratings = [10.0, 8.5, 9.5, 7.0, 7.0, 9.5, 9.0, 9.5]\n\nprint(album_ratings)", "[10.0, 8.5, 9.5, 7.0, 7.0, 9.5, 9.0, 9.5]\n" ] ], [ [ "The <code>sum()</code> function adds all the elements in a list or tuple:", "_____no_output_____" ] ], [ [ "# Use sum() to add every element in a list or tuple together\n\nsum(album_ratings)", "_____no_output_____" ] ], [ [ "The <code>len()</code> function returns the length of a list or tuple: ", "_____no_output_____" ] ], [ [ "# Show the length of the list or tuple\n\nlen(album_ratings)", "_____no_output_____" ] ], [ [ "#### Using <code>if</code>/<code>else</code> Statements and Loops in Functions", "_____no_output_____" ], [ "The <code>return()</code> function is particularly useful if you have any IF statements in the function, when you want your output to be dependent on some condition: ", "_____no_output_____" ] ], [ [ "# Function example\n\ndef type_of_album(artist, album, year_released):\n \n print(artist, album, year_released)\n if year_released > 1980:\n return \"Modern\"\n else:\n return \"Oldie\"\n \nx = type_of_album(\"Michael Jackson\", \"Thriller\", 1980)\nprint(x)", "Michael Jackson Thriller 1980\nOldie\n" ], [ "y = type_of_album(\"Michael Jackson\", \"Thriller\", 1990)\nprint(y)", "Michael Jackson Thriller 1990\nModern\n" ] ], [ [ "We can use a loop in a function. For example, we can <code>print</code> out each element in a list:", "_____no_output_____" ] ], [ [ "# Print the list using for loop\n\ndef PrintList(the_list):\n for element in the_list:\n print(element[i])", "_____no_output_____" ], [ "# Implement the printlist function\n#PrintList(['1', 1, 'the man', \"abc\"])", "_____no_output_____" ] ], [ [ "<hr>", "_____no_output_____" ], [ "##### Setting default argument values in your custom functions", "_____no_output_____" ], [ "You can set a default value for arguments in your function. For example, in the <code>isGoodRating()</code> function, what if we wanted to create a threshold for what we consider to be a good rating? Perhaps by default, we should have a default rating of 4:", "_____no_output_____" ] ], [ [ "# Example for setting param with default value\n\ndef isGoodRating(rating=4): \n \n if(rating < 7):\n print(\"this album sucks it's rating is\",rating)\n \n else:\n print(\"this album is good its rating is\",rating)\n", "_____no_output_____" ], [ "# Test the value with default value and with input\n\nisGoodRating(4)\nisGoodRating(10)", "this album sucks it's rating is 4\nthis album is good its rating is 10\n" ] ], [ [ "<hr>", "_____no_output_____" ], [ "#### Global variables", "_____no_output_____" ], [ "So far, we've been creating variables within functions, but we have not discussed variables outside the function. These are called global variables. \n<br>\nLet's try to see what <code>printer1</code> returns:", "_____no_output_____" ] ], [ [ "# Example of global variable\n\nartist = \"Michael Jackson\"\n\ndef printer1(artist):\n internal_var = artist\n print(artist, \"is an artist\")\n \nprinter1(internal_var)", "Whitney Houston is an artist\n" ] ], [ [ "If we print <code>internal_var</code> we get an error. ", "_____no_output_____" ], [ "<b>We got a Name Error: <code>name 'internal_var' is not defined</code>. Why?</b> \n\nIt's because all the variables we create in the function is a <b>local variable</b>, meaning that the variable assignment does not persist outside the function. \n\nBut there is a way to create <b>global variables</b> from within a function as follows:", "_____no_output_____" ] ], [ [ "artist = \"Michael Jackson\"\n\ndef printer(artist):\n global internal_var \n internal_var= \"Whitney Houston\"\n print(artist,\"is an artist\")\n\nprinter(artist)\nprinter(internal_var)", "Michael Jackson is an artist\nWhitney Houston is an artist\n" ] ], [ [ "#### Scope of a Variable", "_____no_output_____" ], [ " The scope of a variable is the part of that program where that variable is accessible. Variables that are declared outside of all function definitions, such as the <code>myFavouriteBand</code> variable in the code shown here, are accessible from anywhere within the program. As a result, such variables are said to have global scope, and are known as global variables. \n <code>myFavouriteBand</code> is a global variable, so it is accessible from within the <code>getBandRating</code> function, and we can use it to determine a band's rating. We can also use it outside of the function, such as when we pass it to the print function to display it:", "_____no_output_____" ] ], [ [ "# Example of global variable\n\nmyFavouriteBand = \"AC/DC\"\n\ndef getBandRating(bandname):\n if bandname == myFavouriteBand:\n return 10.0\n else:\n return 0.0\n\nprint(\"AC/DC's rating is:\", getBandRating(\"AC/DC\"))\nprint(\"Deep Purple's rating is:\",getBandRating(\"Deep Purple\"))\nprint(\"My favourite band is:\", myFavouriteBand)", "AC/DC's rating is: 10.0\nDeep Purple's rating is: 0.0\nMy favourite band is: AC/DC\n" ] ], [ [ " Take a look at this modified version of our code. Now the <code>myFavouriteBand</code> variable is defined within the <code>getBandRating</code> function. A variable that is defined within a function is said to be a local variable of that function. That means that it is only accessible from within the function in which it is defined. Our `getBandRating` function will still work, because <code>myFavouriteBand</code> is still defined within the function. However, we can no longer print <code>myFavouriteBand</code> outside our function, because it is a local variable of our <code>getBandRating</code> function; it is only defined within the <code>getBandRating</code> function:", "_____no_output_____" ] ], [ [ "# Example of local variable\n\ndef getBandRating(bandname):\n myFavouriteBand = \"AC/DC\"\n if bandname == myFavouriteBand:\n return 10.0\n else:\n return 0.0\n\nprint(\"AC/DC's rating is: \", getBandRating(\"AC/DC\"))\nprint(\"Deep Purple's rating is: \", getBandRating(\"Deep Purple\"))\nprint(\"My favourite band is\", myFavouriteBand)", "AC/DC's rating is: 10.0\nDeep Purple's rating is: 0.0\nMy favourite band is AC/DC\n" ] ], [ [ " Finally, take a look at this example. We now have two <code>myFavouriteBand</code> variable definitions. The first one of these has a global scope, and the second of them is a local variable within the <code>getBandRating</code> function. Within the <code>getBandRating</code> function, the local variable takes precedence. **Deep Purple** will receive a rating of 10.0 when passed to the <code>getBandRating</code> function. However, outside of the <code>getBandRating</code> function, the <code>getBandRating</code> s local variable is not defined, so the <code>myFavouriteBand</code> variable we print is the global variable, which has a value of **AC/DC**:", "_____no_output_____" ] ], [ [ "# Example of global variable and local variable with the same name\n\nmyFavouriteBand = \"AC/DC\"\n\ndef getBandRating(bandname):\n myFavouriteBand = \"Deep Purple\"\n if bandname == myFavouriteBand:\n return 10.0\n else:\n return 0.0\n\nprint(\"AC/DC's rating is:\",getBandRating(\"AC/DC\"))\nprint(\"Deep Purple's rating is: \",getBandRating(\"Deep Purple\"))\nprint(\"My favourite band is:\", myFavouriteBand)", "AC/DC's rating is: 0.0\nDeep Purple's rating is: 10.0\nMy favourite band is: AC/DC\n" ] ], [ [ "<div class=\"alert alert-block alert-info\" style=\"margin-top: 20px\">\n<a id='lambdas'></a>\n \n### Functions: String Formats, Concatenation, Lambdas, Iterators, Generators\n</div>", "_____no_output_____" ], [ "#### String Format function", "_____no_output_____" ] ], [ [ "name = \"okocha\"\nage = 20 \njersey = 20", "_____no_output_____" ], [ "#using the f string format will help you format the print statement\nprint(f\"{name} is {age} years old. His jersey number is {jersey}\")", "okocha is 20 years old. His jersey number is 20\n" ], [ "#the above code can be replaicated as this:\n\nprint(\"{} is {} is 20 years old. His jersey number is {}\".format(name, age, jersey))", "okocha is 20 is 20 years old. His jersey number is 20\n" ], [ "print(f\"I like curly brackets {{}}\")", "I like curly brackets {}\n" ], [ "print(f\"Neuer is the 'best' goalkeeper in the \\n world\")", "Neuer is the 'best' goalkeeper in the \n world\n" ] ], [ [ "#### Concatenation", "_____no_output_____" ], [ "#### Lambda ", "_____no_output_____" ], [ "#### Iterators", "_____no_output_____" ], [ "#### Generators", "_____no_output_____" ], [ "#### Time it Magic", "_____no_output_____" ] ], [ [ "%%timeit\nsum([1, 2, 3])", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-block alert-info\" style=\"margin-top: 20px\">\n<a id='errors'></a>\n \n### Errors: Try and Except Handling, Error blocking and Error Tracing\n</div>", "_____no_output_____" ], [ "This is how to simply handle error and move your program cells.", "_____no_output_____" ] ], [ [ "my_tuple = (1, 2, 3)", "_____no_output_____" ], [ "my_tuple[0] = -1", "_____no_output_____" ], [ "try:\n my_tuple[0] = -1\nexcept TypeError:\n print(\"This can't be done\")\n \nprint(\"program will not be stopped\")", "This can't be done\nprogram will not be stopped\n" ], [ "my_list3 = [1, 3, 5, 7]\n\ntry:\n print(my_list3[4])\nexcept IndexError:\n print(\"out of range selection\")", "out of range selection\n" ], [ "a = 1\nb = 0\n\ntry:\n print(c = a / b)\nexcept ZeroDivisionError:\n print(\"stopped\")", "stopped\n" ], [ "def division(a, b):\n try:\n return a/b\n except Exception:\n return -1", "_____no_output_____" ] ], [ [ "it is good practice to state the particular error type to get", "_____no_output_____" ], [ "*Copyright &copy; 2020 The Data Incubator. All rights reserved.*\n**Edited by Chisiomloius**", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-info\" style=\"margin-top: 20px\">\n<a id='assignment-link'></a>\n\n### Assignment Link\n\n</div>", "_____no_output_____" ], [ "Now we would try out some practical examples with what we have learnt so far ! Let us try out this [notebook](https://typei.com)", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-info\" style=\"margin-top: 20px\">\n<a id='after-thoughts'></a>\n \n### After Thoughts ??\n</div>", "_____no_output_____" ], [ "What do you think so far ?", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-info\" style=\"margin-top: 20px\">\n<a id='about'></a>\n \n### About this Instructor:\n</div>\n<p><a href=\"https://github.com/chisomloius/\" target= \"_blank\"> ChisomLoius</a> is very passionate about Data Analysis and Machine Learning and does lot of free lance teaching and learning. Holding a B.Eng. in Petroleum Engineering, my focused is leveraging the knowledge of Data Science and Machine Learning to help build solutions in Education and High Tech Security. I currently work as a Petrochemist.\n</p>", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-info\" style=\"margin-top: 20px\">\n<a id='more-info'></a>\n \n### More Info\n</div>", "_____no_output_____" ], [ "\n<p> Visit our <a href=\"https://techorigin.alisutechnology.com\" target= \"_blank\">website</a>, or further enquire more information via our <a href=\"[email protected]\" target= \"_blank\">email</a>. \n<hr>\n<p>Copyright &copy; 2021 TechOrigin. This notebook and its source code are released under the terms of the <a href=\"https://cognitiveclass.ai/mit-license/\">MIT License</a>.</p>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
c52f625a711f8de43876d3d68666efe796b6bd24
62,828
ipynb
Jupyter Notebook
src/inDelphi_Check_Data_Leakage.ipynb
gifford-lab/skipguide-analysis
e1e2208bbdc89810db430e8b960c188b9712ee57
[ "MIT" ]
null
null
null
src/inDelphi_Check_Data_Leakage.ipynb
gifford-lab/skipguide-analysis
e1e2208bbdc89810db430e8b960c188b9712ee57
[ "MIT" ]
null
null
null
src/inDelphi_Check_Data_Leakage.ipynb
gifford-lab/skipguide-analysis
e1e2208bbdc89810db430e8b960c188b9712ee57
[ "MIT" ]
null
null
null
65.241952
25,480
0.618625
[ [ [ "from config import *\nfrom utils import *\n\nimport os\nimport sys\nimport copy\nimport numpy as np\nimport collections\nimport multiprocessing\nimport pickle\n\nimport numpy as np\nimport scipy\n\n# Suppress pandas future warning, which messes tqdm\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nimport pandas as pd\n\nfrom tqdm.notebook import tqdm\n\n%matplotlib inline\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom Bio import pairwise2", "_____no_output_____" ] ], [ [ "# Check for inDelphi training data leakage\ninDelphi (Shen et al. 2018) is trained on a dataset of 55-bp sequences (available from their [GitHub](https://github.com/maxwshen/indelphi-dataprocessinganalysis/blob/master/SupplementaryData.xlsx)), referred to as \"lib-A\" in its paper. We are evaluating the performance of inDelphi on our lib-SA library of 61-bp sequences (specifically, the dat-A subset). For the evaluation to be meaningful, we need to make sure inDelphi lib-A sequences do not overlap and are not homologous to sequences from our dat-A.", "_____no_output_____" ], [ "## inDelphi's Lib-A", "_____no_output_____" ] ], [ [ "libA_df = pd.read_excel(os.path.join(DATA_DIR, 'indelphiLibA', 'SupplementaryData.xlsx'), header=1, sheet_name='Supplementary Table 2')", "_____no_output_____" ], [ "libA_df.head()", "_____no_output_____" ], [ "libA_seqs = libA_df['Sequence Context'].unique().tolist()", "_____no_output_____" ], [ "len(libA_seqs)", "_____no_output_____" ] ], [ [ "## Our dat-A Target Sequences", "_____no_output_____" ] ], [ [ "exp_design.head() # lib-SA", "_____no_output_____" ], [ "datA_df = pd.read_csv(os.path.join(TABLES_DIR, 'datA_table.csv.gz'), compression='gzip')\ndatA_df", "_____no_output_____" ], [ "datA_seqs = exp_design.loc[datA_df['gRNA ID'].unique()]['Designed 61-bp target site (37i-24e, AG)'].unique().tolist()", "_____no_output_____" ], [ "len(datA_seqs)", "_____no_output_____" ] ], [ [ "## Sequence Identity Analysis\nFor each target sequence in our dat-A, align it with every sequence in inDelphi's lib-A to determine the most similar sequence, and record the sequence identity. Plot distribution of such max sequence identities. If lib-SA sequences are dissimilar to inDelphi's lib-A sequences, then the distribution should be skewed towards lower max sequence identities.\n\nLocal alignment (Smith Waterman) parameters: +1 match, -3 mismatch, -5 gap open, -2 gap extend. These are the same as the default parameters of BLAST's blastn-short program.\n\nSequence identity is the definition used by BLAST: (# match positions in alignment(seq1, seq2))/(min(len(seq1), len(seq2))", "_____no_output_____" ] ], [ [ "def sequence_identity(seq1, seq2, alignment):\n num_matches = pairwise2.format_alignment(*alignment).split('\\n')[1].count('|')\n return num_matches / min(len(seq1), len(seq2))", "_____no_output_____" ], [ "def max_seq_identity_libA(our_seq):\n max_seq_identity = -1\n for inDelphi_seq in libA_seqs:\n # Using BLAST suite's blastn-short defaults:\n # +1 match\n # -3 mismatch\n # -5 gap open\n # -2 gap extend\n alignment = pairwise2.align.localms(inDelphi_seq, our_seq, 1, -3, -5, -2)\n identity = sequence_identity(inDelphi_seq, our_seq, alignment[0])\n max_seq_identity = max(max_seq_identity, identity)\n return max_seq_identity\n\ndef compute_max_sequence_identities():\n max_sequence_identities = []\n \n try:\n p = multiprocessing.Pool(NUM_PROCESSES)\n for max_seq_identity in tqdm(p.imap_unordered(max_seq_identity_libA, datA_seqs, chunksize=2), total=len(datA_seqs)):\n max_sequence_identities.append(max_seq_identity)\n finally:\n p.close()\n p.join()\n \n return max_sequence_identities", "_____no_output_____" ], [ "if not pickle_exists(DAT_A_INDELPHI_SEQUENCE_IDENTITY):\n max_sequence_identities = compute_max_sequence_identities()\n save_var(max_sequence_identities, DAT_A_INDELPHI_SEQUENCE_IDENTITY)\nelse:\n max_sequence_identities = load_var(DAT_A_INDELPHI_SEQUENCE_IDENTITY)", "_____no_output_____" ] ], [ [ "## S2 FigA", "_____no_output_____" ] ], [ [ "def plot_max_sequence_identities(max_sequence_identities):\n plt.rcParams.update({'font.size': 12})\n fig, ax = plt.subplots(figsize=(5,5))\n sns.distplot(max_sequence_identities, kde=False, ax = ax)\n ax.set(xlabel=\"Sequence Identity\",\n ylabel='# of dat-A Target Sequences (' + str(len(datA_seqs)) + ' Total)',\n title=\"Distribution of pairwise best aligned\\nsequence identity\\nbetween dat-A & inDelphi's Lib-A\")\n median = np.median(max_sequence_identities)\n plt.axvline(median, color='gray', linestyle='dotted')\n plt.text(median + 0.01, 450, 'Median = ' + \"{:.2f}\".format(median))\n plt.savefig(os.path.join(IMAGES_DIR, 'datA_indelphi_sequence_identity.png'), dpi=300, bbox_inches='tight')\n plt.show()\n print(\"Median sequence identity:\", np.median(max_sequence_identities))\n print(\"Mean sequence identity:\", np.mean(max_sequence_identities))\n print(\"Min sequence identity:\", np.min(max_sequence_identities))\n print(\"Max sequence identity:\", np.max(max_sequence_identities))\n print(\"Second largest sequence identity:\", np.sort(max_sequence_identities)[::-1][1])", "_____no_output_____" ], [ "plot_max_sequence_identities(max_sequence_identities)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
c52f7a0ae0a66a06d786793911952e1e3862e314
30,235
ipynb
Jupyter Notebook
VLDB2022_demos/single_round_demo.ipynb
valterUo/Quantum-Computing-based-Optimization-for-Sustainable-Data-Workflows-in-Cloud
7de12e9f67bc81c2a56434fe58c6a8f96c504f3b
[ "MIT" ]
null
null
null
VLDB2022_demos/single_round_demo.ipynb
valterUo/Quantum-Computing-based-Optimization-for-Sustainable-Data-Workflows-in-Cloud
7de12e9f67bc81c2a56434fe58c6a8f96c504f3b
[ "MIT" ]
null
null
null
VLDB2022_demos/single_round_demo.ipynb
valterUo/Quantum-Computing-based-Optimization-for-Sustainable-Data-Workflows-in-Cloud
7de12e9f67bc81c2a56434fe58c6a8f96c504f3b
[ "MIT" ]
1
2022-02-24T20:09:21.000Z
2022-02-24T20:09:21.000Z
32.475832
475
0.588854
[ [ [ "# Practical Quantum Computing Approach for Sustainable Workflow Optimization in Cloud Infrastructures", "_____no_output_____" ], [ "by [Valter Uotila](https://researchportal.helsinki.fi/en/persons/valter-johan-edvard-uotila), PhD student, [Unified Database Management Systems](https://www2.helsinki.fi/en/researchgroups/unified-database-management-systems-udbms/news), University of Helsinki", "_____no_output_____" ], [ "This is just a specified shortest path finding application applied to the problem presented in the [document](https://github.com/valterUo/Quantum-Computing-based-Optimization-for-Sustainable-Data-Workflows-in-Cloud/blob/main/Quantum_Computing__based_Optimization_for_Sustainable_Data_Workflows_in_Cloud.pdf) that comes along with this implementation.", "_____no_output_____" ], [ "Possible quantum software-harware combinations to solve the problem:\n\n1. Amazon Braket: Ocean implementation of this code\n2. D-wave's Leap Advantage: Ocean implementation of this code\n3. IBM Quantum systems\n 1. Simulator in cloud\n 2. NISQ device in cloud\n4. Local machine\n 1. Ocean's imulated annealing\n 2. Qiskit's local qasm simulator", "_____no_output_____" ], [ "## Part 1: Implementation with Ocean connecting to Amazon Braket and D-wave Leap quantum annealers", "_____no_output_____" ] ], [ [ "# Install a pip package in the current Jupyter kernel\n#import sys\n#!{sys.executable} -m pip install numpy\n#!{sys.executable} -m pip install ocean_plugin", "_____no_output_____" ], [ "import dimod\nfrom dimod.generators.constraints import combinations\nfrom dwave.system import LeapHybridSampler, DWaveSampler\nfrom hybrid.reference import KerberosSampler\nfrom dwave.system.composites import EmbeddingComposite\n\nfrom braket.aws import AwsDevice\nfrom braket.ocean_plugin import BraketSampler, BraketDWaveSampler\n\nimport numpy as np\nimport json\nimport itertools\nimport os\nimport math\nimport random\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\nnotebook_path = os.path.abspath(\"main.ipynb\")", "_____no_output_____" ], [ "def append_linear_safe(variable, value, linear_dict):\n if variable in linear_dict.keys():\n linear_dict[variable] = linear_dict[variable] + value\n else:\n linear_dict[variable] = value\n\ndef append_quadratic_safe(variable, value, quadratic_dict):\n if variable in quadratic_dict.keys():\n quadratic_dict[variable] = quadratic_dict[variable] + value\n else:\n quadratic_dict[variable] = value", "_____no_output_____" ] ], [ [ "## Importing data", "_____no_output_____" ], [ "This demonstration implements three different sized data sets. Comment and uncomment the data sets you want to use.", "_____no_output_____" ] ], [ [ "# Demonstration 1\n\n#cloud_partners_data = \"cloud_partners_small.json\"\n#workload_data = \"workload_small.json\"\n#strength = 1500.0\n#num_reads = 10\n#annealing_time = 1.0 # This is the minimal possible annealing time\n\n# Demonstration 2\n\n#cloud_partners_data = \"cloud_partners_medium.json\"\n#workload_data = \"workload_medium.json\"\n#strength = 90.0\n#num_reads = 900\n#annealing_time = 20.0\n\n# Demonstration 3\n\ncloud_partners_data = \"cloud_partners_large.json\"\nworkload_data = \"workload_large.json\"\nstrength = 100.0", "_____no_output_____" ], [ "cloud_partners_file_path = os.path.join(os.path.dirname(notebook_path), \"data/single_round_data/cloud_partners/\" + cloud_partners_data)\nf = open(cloud_partners_file_path)\npartners_root = json.load(f)\ncloud_partners = partners_root[\"cloud_partners\"]\n\nworkload_file_path = os.path.join(os.path.dirname(notebook_path), \"data/single_round_data/workloads/\" + workload_data)\nf = open(workload_file_path)\nworkload_root = json.load(f)\nworkload = workload_root[\"workload\"]\n\n#print(\"Cloud partners: \", json.dumps(cloud_partners, indent=1))\n#print(\"Workloads: \", json.dumps(workload, indent=1))", "_____no_output_____" ] ], [ [ "## Emission simulator", "_____no_output_____" ], [ "This section implements an emission simulator which simulates emission changes in data center operations. Note that it is relatively hard to get accurate data from individual data centers. This simulator is just for demonstration and it does not have an actual scientific background.", "_____no_output_____" ] ], [ [ "def emission_simulator(variable1, variable2, cloud_partners, workload):\n simulated_carbon_footprint = 1\n emission_factor = 1\n workload_type_in_process = None\n \n source_data_center_id = variable1[1]\n work_in_process = variable2[0]\n target_data_center_id = variable2[1]\n \n for work in workload:\n if work[\"work_id\"] == int(work_in_process):\n emission_factor = work[\"emission_factor\"]\n workload_type_in_process = work[\"work_type\"]\n \n for partner in cloud_partners:\n for center in partner[\"data_centers\"]:\n # Find correct target center\n if target_data_center_id == center[\"center_id\"]:\n for workload_type in center[\"workload_dependent_emissions\"]:\n # Find correct workload type i.e. Big Data, IoT, ML, etc.\n if workload_type_in_process == workload_type[\"workload_type\"]:\n center_emission_factor = workload_type[\"center_emission_factor\"]\n #print(center_emission_factor)\n simulated_carbon_footprint = emission_factor*center_emission_factor\n \n return simulated_carbon_footprint", "_____no_output_____" ] ], [ [ "## Creating variables for the binary quadratic model", "_____no_output_____" ], [ "In the demo paper we defined variables to be $ x_{i,j} = (w_i, d_j) $.", "_____no_output_____" ] ], [ [ "#%%timeit\nvariables = dict()\nworkload_order = []\n\nfor work in workload:\n variables[str(work[\"work_id\"])] = list()\n workload_order.append(str(work[\"work_id\"]))\n for partner in cloud_partners:\n for center in partner[\"data_centers\"]:\n # The each key in the variables dictionary corresponds to a level in a tree i.e. a time step in the workflow\n variables[str(work[\"work_id\"])].append((str(work[\"work_id\"]), center[\"center_id\"]))\n \n#print(json.dumps(variables, indent=1))", "_____no_output_____" ] ], [ [ "## Constructing constraints ", "_____no_output_____" ], [ "### Constraint 1", "_____no_output_____" ], [ "This constraint implements the requirement that for every work $ w_i $ we have exactly one variable $ x_{i,j} = (w_i, d_j) = 1$. In other words, this means that every work is executed exactly on a single data center.", "_____no_output_____" ] ], [ [ "def construct_bqm_constraint1(bqm, variables, strength):\n for work_id in variables:\n one_work_bqm = combinations(variables[work_id], 1, strength=strength)\n bqm.update(one_work_bqm)\n return bqm", "_____no_output_____" ] ], [ [ "### Constraint 2", "_____no_output_____" ], [ "This constraint implements the requirement that for every pair of variables $x_{i,j} = (w_i, d_j)$ and $x_{i+1,k} = (w_{i+1}, d_k)$ we associate the estimated emission coefficient $e(x_{i,j}, x_{i+1,k})$. This coefficient is calculated in emission_simulator function. Note that we need to calculate this only for those pairs, where the works $w_i$ and $w_{i+1}$ are consecutive works in the workload.", "_____no_output_____" ], [ "To evaluate the algorithm we store the tree in a networkx graph.", "_____no_output_____" ] ], [ [ "def construct_bqm_constraint2(bqm, variables, workload_order):\n vartype = dimod.BINARY\n A = 1\n linear = dict()\n quadratic = dict()\n offset = 0.0\n tree = nx.Graph()\n\n for work_id_current in range(len(workload_order) - 1):\n work_id_next = work_id_current + 1\n key_current = workload_order[work_id_current]\n key_next = workload_order[work_id_next]\n\n for work1 in variables[key_current]:\n for work2 in variables[key_next]:\n \n coeff = emission_simulator(work1, work2, cloud_partners, workload)\n \n append_quadratic_safe((work1, work2), coeff, quadratic)\n tree.add_edge(work1, work2, weight=coeff)\n\n #print(\"Works\", work1, work2)\n #print(\"Coefficient\", coeff)\n\n bqm_c2 = dimod.BinaryQuadraticModel(linear, quadratic, offset, vartype)\n bqm_c2.scale(A)\n bqm.update(bqm_c2)\n return bqm, tree", "_____no_output_____" ] ], [ [ "## Demonstrating algorithm", "_____no_output_____" ] ], [ [ "def compare_to_optimal(solution, tree, optimal_weight):\n current_total = 0\n try:\n for i in range(len(solution) - 1):\n edge_weight = tree.get_edge_data(solution[i], solution[i+1])\n current_total += edge_weight[\"weight\"]\n except:\n print(\"The quantum result contains edges which are not in the tree.\")\n return np.abs(optimal_weight - current_total)/optimal_weight", "_____no_output_____" ], [ "def print_solution(sample, tree, optimal_weight = -1):\n positive_solution = []\n for varname, value in sample.items():\n if value == 1:\n positive_solution.append(varname)\n print(varname, value)\n positive_solution = sorted(positive_solution, key=lambda x: int(x[0]))\n if optimal_weight != -1:\n print(\"Difference from the optimal \", compare_to_optimal(positive_solution, tree, optimal_weight))", "_____no_output_____" ] ], [ [ "### Wrapping up various methods to solve the QUBO", "_____no_output_____" ] ], [ [ "def solve_bqm_in_leap(bqm, sampler = \"DWaveSampler\"):\n bqm.normalize()\n if sampler == \"DWaveSampler\":\n \n num_reads = 900\n annealing_time = 20.0\n sampler = DWaveSampler()\n sampler = EmbeddingComposite(sampler)\n sampleset = sampler.sample(bqm, num_reads=num_reads, annealing_time = annealing_time, label = 'Data workflow optimization with DWaveSampler')\n \n elif sampler == \"Kerberos\":\n \n kerberos_sampler = KerberosSampler()\n sampleset = kerberos_sampler.sample(bqm, max_iter=10, convergence=3, qpu_params={'label': 'Data workflow optimization with Kerberos'})\n \n elif sampler == \"LeapHybrid\":\n \n sampler = LeapHybridSampler()\n sampleset = sampler.sample(bqm)\n \n print(json.dumps(sampleset.info, indent=1))\n sample = sampleset.first.sample\n return sample\n \n #print(sampleset)\n #print(best_solution)\n #sample = best_solution\n #energy = sampleset.first.energy", "_____no_output_____" ], [ "def solve_bqm_in_amazon_braket(bqm, system = \"Advantage\"):\n device = None\n num_reads = 900\n annealing_time = 20.0\n if system == \"Advantage\":\n device = \"arn:aws:braket:::device/qpu/d-wave/Advantage_system4\"\n elif system == \"2000Q\":\n device = \"arn:aws:braket:::device/qpu/d-wave/DW_2000Q_6\"\n sampler = BraketDWaveSampler(device_arn = device)\n sampler = EmbeddingComposite(sampler)\n sampleset = sampler.sample(bqm, num_reads=num_reads, annealing_time = annealing_time)\n sample = sampleset.first.sample\n \n # print timing info for the previous D-Wave job\n print(json.dumps(sampleset.info['additionalMetadata']['dwaveMetadata']['timing'], indent=1))\n \n return sample", "_____no_output_____" ], [ "def solve_with_simulated_annealing(bqm):\n num_reads = 200\n sampler = dimod.SimulatedAnnealingSampler()\n sampleset = sampler.sample(bqm, num_reads=num_reads)\n sample = sampleset.first.sample\n return sample", "_____no_output_____" ], [ "def solve_exactly(bqm):\n sampler = dimod.ExactSolver()\n sampleset = sampler.sample(bqm)\n sample = sampleset.first.sample\n return sample", "_____no_output_____" ], [ "def solve_with_networkx(tree, variables, start_work):\n possible_solutions = []\n best_solution = None\n min_weight = float('Inf')\n for source_var in variables[start_work]:\n for target_var in variables[str(len(variables) - 1)]:\n possible_solutions.append(nx.dijkstra_path(tree, source=source_var, target=target_var))\n for sol in possible_solutions:\n current_total = 0\n for i in range(len(sol) - 1):\n edge_weight = tree.get_edge_data(sol[i], sol[i+1])\n current_total += edge_weight[\"weight\"]\n #print(\"Shortest path \", sol)\n #print(\"Current total \", current_total)\n if min_weight > current_total:\n min_weight = current_total\n best_solution = sol\n return best_solution, min_weight", "_____no_output_____" ] ], [ [ "## Run single time step", "_____no_output_____" ] ], [ [ "vartype = dimod.BINARY\nbqm = dimod.BinaryQuadraticModel({}, {}, 0.0, vartype)", "_____no_output_____" ] ], [ [ "Timing the construction of the model", "_____no_output_____" ] ], [ [ "#%timeit construct_bqm_constraint1(bqm, variables, strength)\n#%timeit construct_bqm_constraint2(bqm, variables, workload_order)", "_____no_output_____" ] ], [ [ "Constructing the model", "_____no_output_____" ] ], [ [ "bqm = construct_bqm_constraint1(bqm, variables, strength)\nbqm, tree = construct_bqm_constraint2(bqm, variables, workload_order)\n#print(bqm)\n\n#print(\"The problem is to find the minimum path from some of the nodes ('0', x) to some of the nodes ('5', y). The weight of the edges are defined by carbon footprint associated to the computation.\")\n#nx.draw(tree, with_labels = True)", "_____no_output_____" ] ], [ [ "#### Optimal and correct solution for evaluation", "_____no_output_____" ], [ "Timing the classical solution", "_____no_output_____" ] ], [ [ "#%timeit solve_with_networkx(tree, variables, '0')", "_____no_output_____" ] ], [ [ "Solving the problem classically", "_____no_output_____" ] ], [ [ "print(\"Size of the problem\")\nprint(\"Number of nodes: \", tree.number_of_nodes())\nprint(\"Number of edges: \", tree.number_of_nodes())\n\nbest_solution, optimal_weight = solve_with_networkx(tree, variables, '0')\nprint(\"Best solution: \", best_solution)\nprint(\"Optimal weight: \", optimal_weight)", "Size of the problem\nNumber of nodes: 400\nNumber of edges: 400\nBest solution: [('0', '00'), ('1', '53'), ('2', '50'), ('3', '53'), ('4', '10'), ('5', '50'), ('6', '50'), ('7', '42'), ('8', '21'), ('9', '53'), ('10', '50'), ('11', '53'), ('12', '10'), ('13', '50'), ('14', '50'), ('15', '42')]\nOptimal weight: 114\n" ] ], [ [ "The following results we obtain with annealing. Ideally we would be close to the results we obtain from the function solve_with_networkx.", "_____no_output_____" ] ], [ [ "#print(\"Solution with Amazon Braket using Advantage\")\n#solution = solve_bqm_in_amazon_braket(bqm)\n#print_solution(solution, tree, optimal_weight)\n\n#print(\"Solution with Amazon Braket using 2000Q\")\n#solution = solve_bqm_in_amazon_braket(bqm, \"2000Q\")\n#print_solution(solution, tree, optimal_weight)\n\n#print(\"Solution with D-wave Leap with DWaveSampler\")\n#solution = solve_bqm_in_leap(bqm, \"DWaveSampler\")\n#print_solution(solution, tree, optimal_weight)\n\n#print(\"Solution with D-wave Leap with LeapHybridSampler\")\n#solution = solve_bqm_in_leap(bqm, \"LeapHybrid\")\n#print_solution(solution, tree, optimal_weight) Kerberos\n\n#print(\"Solution with D-wave Leap with KerberosSampler\")\n#solution = solve_bqm_in_leap(bqm, \"Kerberos\")\n#print_solution(solution, tree, optimal_weight) \n\nprint(\"Solution with simulated annealing\")\n%timeit solve_with_simulated_annealing(bqm)\nsolution = solve_with_simulated_annealing(bqm)\nprint_solution(solution, tree, optimal_weight)\n\n#print(\"Exact solution (takes time)\")\n#solve_exactly()", "Solution with simulated annealing\n" ] ], [ [ "## Part 2: Transfering problem to Qiskit", "_____no_output_____" ], [ "In this part of the code I rely on the [Qiskit Tutorials](https://qiskit.org/documentation/optimization/tutorials/index.html). I want to learn to understand the connection between Ocean implementation and Qiskit. The formulation in Qiskit enables solving the problem using IBM Quantum systems. Although Amazon Braket does not implement the following kind of approach, it might be possible to translate the Qiskit into the equivalent Pennylane code and run it in Braket.", "_____no_output_____" ], [ "### Importing Qiskit and IBM Quantum Systems", "_____no_output_____" ] ], [ [ "from qiskit import IBMQ, BasicAer\nfrom qiskit.providers.basicaer import QasmSimulatorPy\nfrom qiskit.utils import algorithm_globals, QuantumInstance\nfrom qiskit.algorithms import QAOA, NumPyMinimumEigensolver\nfrom qiskit_optimization.algorithms import (\n MinimumEigenOptimizer,\n RecursiveMinimumEigenOptimizer,\n SolutionSample,\n OptimizationResultStatus,\n)\nfrom qiskit_optimization import QuadraticProgram\n\nprovider = IBMQ.load_account()", "_____no_output_____" ] ], [ [ "### Transforming QUBO in Ocean to QUBO in Qiskit ", "_____no_output_____" ], [ "Function for evaluating Qiskit result:", "_____no_output_____" ] ], [ [ "def evaluate_qiskit_solution(result, tree, optimal):\n #print(result.variables_dict)\n path = []\n for key in result.variables_dict:\n if result.variables_dict[key] == 1.0:\n path.append(eval(key))\n print(\"Difference (in [0,1]) between the optimal solution and the solution found with Qiskit:\")\n print(compare_to_optimal(path, tree, optimal))", "_____no_output_____" ] ], [ [ "Transforming the QUBO in Qiskit. We use QAOA module in order to understand the details of the process better.", "_____no_output_____" ] ], [ [ "qubo = QuadraticProgram()\nqubo_variables = []\nfor var in bqm.variables:\n qubo.binary_var(str(var))\n qubo_variables.append(str(var))\n\nconstant = bqm.offset\nlinear = []\nquadratic = {}\n\nfor var in bqm.variables:\n linear.append(bqm.linear[var])\n \nfor key in bqm.quadratic:\n quadratic[(str(key[0]), str(key[1]))] = bqm.quadratic[key]\n\n#print(\"Variables: \", qubo_variables)\n#print(\"Offset \", constant)\n#print(\"Linear \", linear)\n#print(\"Quadratic \", quadratic)\n\nqubo.minimize(constant = constant, linear=linear, quadratic=quadratic)", "_____no_output_____" ], [ "# Local qasm simulator\nbackend = BasicAer.get_backend(\"qasm_simulator\")\n\n# ibmq_quito real universal QPU\n#backend = provider.get_backend('ibmq_quito')\n\n# IBM QASM simulator in cloud\n#backend = provider.get_backend('ibmq_qasm_simulator')\n\nalgorithm_globals.random_seed = 10598\nquantum_instance = QuantumInstance(\n backend = backend,\n seed_simulator=algorithm_globals.random_seed,\n seed_transpiler=algorithm_globals.random_seed,\n)\nqaoa_mes = QAOA(quantum_instance=quantum_instance)\nexact_mes = NumPyMinimumEigensolver()", "_____no_output_____" ], [ "qaoa = MinimumEigenOptimizer(qaoa_mes) # using QAOA\nexact = MinimumEigenOptimizer(exact_mes) # using the exact classical numpy minimum eigen solver\n\nqaoa_result = qaoa.solve(qubo)\nprint(qaoa_result)\nprint()\nevaluate_qiskit_solution(qaoa_result, tree, optimal_weight)", "_____no_output_____" ], [ "if type(backend) == QasmSimulatorPy:\n %timeit qaoa.solve(qubo)", "_____no_output_____" ], [ "#rqaoa = RecursiveMinimumEigenOptimizer(qaoa, min_num_vars=1, min_num_vars_optimizer=exact)\n#rqaoa_result = rqaoa.solve(qubo)\n#print(rqaoa_result)\n#print()\n#evaluate_qiskit_solution(rqaoa_result, tree, optimal_weight)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
c52f96b3b8eacc1a8f7ae7225ea515a4a2ca6eb0
141,881
ipynb
Jupyter Notebook
Machine-Learning-in-90-days-master/Section 1- Python Crash Course/.ipynb_checkpoints/4.5-Matplotlib-checkpoint.ipynb
ashwiniagandhi/Machine-Learning-in-90-days-master
e0fee3cdd205d9c8efc26634b4af47bc22cccaae
[ "BSD-2-Clause" ]
null
null
null
Machine-Learning-in-90-days-master/Section 1- Python Crash Course/.ipynb_checkpoints/4.5-Matplotlib-checkpoint.ipynb
ashwiniagandhi/Machine-Learning-in-90-days-master
e0fee3cdd205d9c8efc26634b4af47bc22cccaae
[ "BSD-2-Clause" ]
null
null
null
Machine-Learning-in-90-days-master/Section 1- Python Crash Course/.ipynb_checkpoints/4.5-Matplotlib-checkpoint.ipynb
ashwiniagandhi/Machine-Learning-in-90-days-master
e0fee3cdd205d9c8efc26634b4af47bc22cccaae
[ "BSD-2-Clause" ]
null
null
null
253.358929
23,108
0.915662
[ [ [ "## MatplotLib Tutorial\n\nMatplotlib is a plotting library for the Python programming language and its numerical mathematics extension NumPy. It provides an object-oriented API for embedding plots into applications using general-purpose GUI toolkits like Tkinter, wxPython, Qt, or GTK+.\n\nSome of the major Pros of Matplotlib are:\n\n* Generally easy to get started for simple plots\n* Support for custom labels and texts\n* Great control of every element in a figure\n* High-quality output in many formats\n* Very customizable in general", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n\n%matplotlib inline", "_____no_output_____" ], [ "import numpy as np", "_____no_output_____" ], [ "## Simple Examples\n\nx=np.arange(0,10)\ny=np.arange(11,21)\n", "_____no_output_____" ], [ "a=np.arange(40,50)\nb=np.arange(50,60)\n", "_____no_output_____" ], [ "##plotting using matplotlib \n\n##plt scatter\n\nplt.scatter(x,y,c='g')\nplt.xlabel('X axis')\nplt.ylabel('Y axis')\nplt.title('Graph in 2D')\nplt.savefig('Test.png')\n\n", "_____no_output_____" ], [ "y=x*x", "_____no_output_____" ], [ "## plt plot\n\nplt.plot(x,y,'r*',linestyle='dashed',linewidth=2, markersize=12)\nplt.xlabel('X axis')\nplt.ylabel('Y axis')\nplt.title('2d Diagram')", "_____no_output_____" ], [ "## Creating Subplots\n\nplt.subplot(2,2,1)\nplt.plot(x,y,'r--')\nplt.subplot(2,2,2)\nplt.plot(x,y,'g*--')\nplt.subplot(2,2,3)\nplt.plot(x,y,'bo')\nplt.subplot(2,2,4)\nplt.plot(x,y,'go')\n", "_____no_output_____" ], [ "x = np.arange(1,11) \ny = 3 * x + 5 \nplt.title(\"Matplotlib demo\") \nplt.xlabel(\"x axis caption\") \nplt.ylabel(\"y axis caption\") \nplt.plot(x,y) \nplt.show()", "_____no_output_____" ], [ "np.pi", "_____no_output_____" ], [ "# Compute the x and y coordinates for points on a sine curve \nx = np.arange(0, 4 * np.pi, 0.1) \ny = np.sin(x) \nplt.title(\"sine wave form\") \n\n# Plot the points using matplotlib \nplt.plot(x, y) \nplt.show() ", "_____no_output_____" ], [ "#Subplot()\n# Compute the x and y coordinates for points on sine and cosine curves \nx = np.arange(0, 5 * np.pi, 0.1) \ny_sin = np.sin(x) \ny_cos = np.cos(x) \n \n# Set up a subplot grid that has height 2 and width 1, \n# and set the first such subplot as active. \nplt.subplot(2, 1, 1)\n \n# Make the first plot \nplt.plot(x, y_sin,'r--') \nplt.title('Sine') \n \n# Set the second subplot as active, and make the second plot. \nplt.subplot(2, 1, 2) \nplt.plot(x, y_cos,'g--') \nplt.title('Cosine') \n \n# Show the figure. \nplt.show()", "_____no_output_____" ], [ "## Bar plot\n\nx = [2,8,10] \ny = [11,16,9] \n\nx2 = [3,9,11] \ny2 = [6,15,7] \nplt.bar(x, y) \nplt.bar(x2, y2, color = 'g') \nplt.title('Bar graph') \nplt.ylabel('Y axis') \nplt.xlabel('X axis') \n\nplt.show()", "_____no_output_____" ] ], [ [ "## Histograms", "_____no_output_____" ] ], [ [ "a = np.array([22,87,5,43,56,73,55,54,11,20,51,5,79,31,27]) \nplt.hist(a) \nplt.title(\"histogram\") \nplt.show()", "_____no_output_____" ] ], [ [ "## Box Plot using Matplotlib", "_____no_output_____" ] ], [ [ "data = [np.random.normal(0, std, 100) for std in range(1, 4)]\n\n# rectangular box plot\nplt.boxplot(data,vert=True,patch_artist=False); ", "_____no_output_____" ], [ "data", "_____no_output_____" ] ], [ [ "## Pie Chart", "_____no_output_____" ] ], [ [ "# Data to plot\nlabels = 'Python', 'C++', 'Ruby', 'Java'\nsizes = [215, 130, 245, 210]\ncolors = ['gold', 'yellowgreen', 'lightcoral', 'lightskyblue']\nexplode = (0.4, 0, 0, 0) # explode 1st slice\n\n# Plot\nplt.pie(sizes, explode=explode, labels=labels, colors=colors,\nautopct='%1.1f%%', shadow=False)\n\nplt.axis('equal')\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
c52f98bc6f62601f71039ef7541d9d47016b68ae
137,679
ipynb
Jupyter Notebook
Chapter10/Word_vector_generation.ipynb
tongni1975/Neural-Networks-with-Keras-Cookbook
d240252774e7f605106a37e3f27657e9e679837d
[ "MIT" ]
2
2019-04-22T07:31:50.000Z
2020-11-27T08:07:18.000Z
Chapter10/Word_vector_generation.ipynb
UsmanChatha/Neural-Networks-with-Keras-Cookbook
d240252774e7f605106a37e3f27657e9e679837d
[ "MIT" ]
null
null
null
Chapter10/Word_vector_generation.ipynb
UsmanChatha/Neural-Networks-with-Keras-Cookbook
d240252774e7f605106a37e3f27657e9e679837d
[ "MIT" ]
4
2019-03-24T03:44:06.000Z
2020-09-18T15:30:24.000Z
48.632639
238
0.314296
[ [ [ "import numpy as np", "_____no_output_____" ], [ "docs = [\"I enjoy playing TT\", \"I like playing TT\"]", "_____no_output_____" ], [ "docs[0][0].split()", "_____no_output_____" ], [ "from sklearn.feature_extraction.text import CountVectorizer\nvectorizer = CountVectorizer(min_df=0, token_pattern=r\"\\b\\w+\\b\")\nvectorizer.fit(docs)\n\nprint(vectorizer.vocabulary_)\n# encode document\nvector = vectorizer.transform(docs)\n# summarize encoded vector\nprint(vector.shape)\nprint(type(vector))\nprint(vector.toarray())", "{'i': 1, 'enjoy': 0, 'playing': 3, 'tt': 4, 'like': 2}\n(2, 5)\n<class 'scipy.sparse.csr.csr_matrix'>\n[[1 1 0 1 1]\n [0 1 1 1 1]]\n" ], [ "print(vectorizer.vocabulary_)\nprint(vector.shape)\nprint(vector.toarray())", "{'i': 1, 'enjoy': 0, 'playing': 3, 'tt': 4, 'like': 2}\n(2, 5)\n[[1 1 0 1 1]\n [0 1 1 1 1]]\n" ], [ "x = []\ny = []\nfor i in range(len(docs)):\n for j in range(len(docs[i].split())):\n t_x = []\n t_y = []\n for k in range(4):\n if(j==k):\n t_y.append(docs[i].split()[k])\n continue\n else:\n t_x.append(docs[i].split()[k])\n x.append(t_x)\n y.append(t_y)", "_____no_output_____" ], [ "x", "_____no_output_____" ], [ "y", "_____no_output_____" ], [ "x2 = []\ny2 = []\nfor i in range(len(x)):\n x2.append(' '.join(x[i]))\n y2.append(' '.join(y[i]))", "_____no_output_____" ], [ "x2", "_____no_output_____" ], [ "y2", "_____no_output_____" ], [ "vector_x = vectorizer.transform(x2)\nvector_x.toarray()", "_____no_output_____" ], [ "vector_y = vectorizer.transform(y2)\nvector_y.toarray()", "_____no_output_____" ], [ "from keras.models import Sequential\nfrom keras.layers import Dense, Embedding\nfrom keras.layers import LSTM , Bidirectional,Dropout\nfrom keras import backend as K\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras import regularizers", "Using TensorFlow backend.\n" ], [ "model = Sequential()\nmodel.add(Dense(3, activation='linear', input_shape=(5,)))\nmodel.add(Dense(5,activation='sigmoid'))\nmodel.summary()", "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nColocations handled automatically by placer.\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_1 (Dense) (None, 3) 18 \n_________________________________________________________________\ndense_2 (Dense) (None, 5) 20 \n=================================================================\nTotal params: 38\nTrainable params: 38\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "model.compile(loss='binary_crossentropy',optimizer='adam')", "_____no_output_____" ], [ "model.fit(vector_x, vector_y, epochs=1000, batch_size=4,verbose=1)", "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse tf.cast instead.\nEpoch 1/1000\n8/8 [==============================] - 1s 107ms/step - loss: 0.9742\nEpoch 2/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.9688\nEpoch 3/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.9642\nEpoch 4/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.9593\nEpoch 5/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.9546\nEpoch 6/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.9501\nEpoch 7/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.9452\nEpoch 8/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.9404\nEpoch 9/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.9364\nEpoch 10/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.9315\nEpoch 11/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.9272\nEpoch 12/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.9230\nEpoch 13/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.9185\nEpoch 14/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.9143\nEpoch 15/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.9101\nEpoch 16/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.9059\nEpoch 17/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.9020\nEpoch 18/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.8978\nEpoch 19/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.8938\nEpoch 20/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.8898\nEpoch 21/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.8857\nEpoch 22/1000\n8/8 [==============================] - 0s 3ms/step - loss: 0.8819\nEpoch 23/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.8781\nEpoch 24/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.8743\nEpoch 25/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.8705\nEpoch 26/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.8668\nEpoch 27/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.8631\nEpoch 28/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.8596\nEpoch 29/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.8559\nEpoch 30/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.8526\nEpoch 31/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.8489\nEpoch 32/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.8456\nEpoch 33/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.8424\nEpoch 34/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.8390\nEpoch 35/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.8358\nEpoch 36/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.8327\nEpoch 37/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.8292\nEpoch 38/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.8263\nEpoch 39/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.8230\nEpoch 40/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.8199\nEpoch 41/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.8168\nEpoch 42/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.8136\nEpoch 43/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.8107\nEpoch 44/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.8077\nEpoch 45/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.8050\nEpoch 46/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.8021\nEpoch 47/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7993\nEpoch 48/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7964\nEpoch 49/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7936\nEpoch 50/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7908\nEpoch 51/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7883\nEpoch 52/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7855\nEpoch 53/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7827\nEpoch 54/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7801\nEpoch 55/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7777\nEpoch 56/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7751\nEpoch 57/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7725\nEpoch 58/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7698\nEpoch 59/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7676\nEpoch 60/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7651\nEpoch 61/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7627\nEpoch 62/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7602\nEpoch 63/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7578\nEpoch 64/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7555\nEpoch 65/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7532\nEpoch 66/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7509\nEpoch 67/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7487\nEpoch 68/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7464\nEpoch 69/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7441\nEpoch 70/1000\n8/8 [==============================] - 0s 3ms/step - loss: 0.7418\nEpoch 71/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7397\nEpoch 72/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7374\nEpoch 73/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7352\nEpoch 74/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7334\nEpoch 75/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7311\nEpoch 76/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7290\nEpoch 77/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7269\nEpoch 78/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7249\nEpoch 79/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7230\nEpoch 80/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7209\nEpoch 81/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7189\nEpoch 82/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7171\nEpoch 83/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7150\nEpoch 84/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7131\nEpoch 85/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7111\nEpoch 86/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7094\nEpoch 87/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7074\nEpoch 88/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7055\nEpoch 89/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7036\nEpoch 90/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7018\nEpoch 91/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.7000\nEpoch 92/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6980\nEpoch 93/1000\n8/8 [==============================] - 0s 3ms/step - loss: 0.6965\nEpoch 94/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6947\nEpoch 95/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6929\nEpoch 96/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6910\nEpoch 97/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6893\nEpoch 98/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6878\nEpoch 99/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6859\nEpoch 100/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6842\nEpoch 101/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6826\nEpoch 102/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6810\nEpoch 103/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6792\nEpoch 104/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6776\nEpoch 105/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6761\nEpoch 106/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6744\nEpoch 107/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6727\nEpoch 108/1000\n8/8 [==============================] - 0s 1ms/step - loss: 0.6713\nEpoch 109/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6698\nEpoch 110/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6680\nEpoch 111/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6666\nEpoch 112/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6650\nEpoch 113/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6635\nEpoch 114/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6620\nEpoch 115/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6604\nEpoch 116/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6588\nEpoch 117/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6574\nEpoch 118/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6558\nEpoch 119/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6545\nEpoch 120/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6529\nEpoch 121/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6514\nEpoch 122/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6501\nEpoch 123/1000\n8/8 [==============================] - 0s 3ms/step - loss: 0.6486\nEpoch 124/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6473\nEpoch 125/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6457\nEpoch 126/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6443\nEpoch 127/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6430\nEpoch 128/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6416\nEpoch 129/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6402\nEpoch 130/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6388\nEpoch 131/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6375\nEpoch 132/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6361\nEpoch 133/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6348\nEpoch 134/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6334\nEpoch 135/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6321\nEpoch 136/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6308\nEpoch 137/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6293\nEpoch 138/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6280\nEpoch 139/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6269\nEpoch 140/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6254\nEpoch 141/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6242\nEpoch 142/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6230\nEpoch 143/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6216\nEpoch 144/1000\n8/8 [==============================] - 0s 3ms/step - loss: 0.6203\nEpoch 145/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6191\nEpoch 146/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6178\nEpoch 147/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6165\nEpoch 148/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6152\nEpoch 149/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6140\nEpoch 150/1000\n8/8 [==============================] - 0s 3ms/step - loss: 0.6128\nEpoch 151/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6115\nEpoch 152/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6103\nEpoch 153/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6091\nEpoch 154/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6078\nEpoch 155/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6066\nEpoch 156/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6055\nEpoch 157/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6042\nEpoch 158/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6030\nEpoch 159/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6018\nEpoch 160/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.6006\nEpoch 161/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5995\nEpoch 162/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5982\nEpoch 163/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5971\nEpoch 164/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5959\nEpoch 165/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5948\nEpoch 166/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5935\nEpoch 167/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5924\nEpoch 168/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5913\nEpoch 169/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5901\nEpoch 170/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5889\nEpoch 171/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5878\nEpoch 172/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5867\nEpoch 173/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5856\nEpoch 174/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5845\nEpoch 175/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5833\nEpoch 176/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5821\nEpoch 177/1000\n8/8 [==============================] - 0s 1ms/step - loss: 0.5810\nEpoch 178/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5799\nEpoch 179/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5788\nEpoch 180/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5777\nEpoch 181/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5766\nEpoch 182/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5754\nEpoch 183/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5744\nEpoch 184/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5733\nEpoch 185/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5721\nEpoch 186/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5711\nEpoch 187/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5700\nEpoch 188/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5690\nEpoch 189/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5679\nEpoch 190/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5668\nEpoch 191/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5656\nEpoch 192/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5646\nEpoch 193/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5635\nEpoch 194/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5625\nEpoch 195/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5614\nEpoch 196/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5603\nEpoch 197/1000\n8/8 [==============================] - 0s 1ms/step - loss: 0.5593\nEpoch 198/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5583\nEpoch 199/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5572\nEpoch 200/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5561\nEpoch 201/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5551\nEpoch 202/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5541\nEpoch 203/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5531\nEpoch 204/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5519\nEpoch 205/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5509\nEpoch 206/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5499\nEpoch 207/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5488\nEpoch 208/1000\n8/8 [==============================] - 0s 3ms/step - loss: 0.5479\nEpoch 209/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5467\nEpoch 210/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5457\nEpoch 211/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5448\nEpoch 212/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5438\nEpoch 213/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5427\nEpoch 214/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5417\nEpoch 215/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5407\nEpoch 216/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5396\nEpoch 217/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5387\nEpoch 218/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5376\nEpoch 219/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5366\nEpoch 220/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5358\nEpoch 221/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5347\nEpoch 222/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5336\nEpoch 223/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5326\nEpoch 224/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5317\nEpoch 225/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5306\nEpoch 226/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5297\nEpoch 227/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5287\nEpoch 228/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5277\nEpoch 229/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5267\nEpoch 230/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5258\nEpoch 231/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5247\nEpoch 232/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5238\nEpoch 233/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5229\nEpoch 234/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5218\nEpoch 235/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5209\nEpoch 236/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5198\nEpoch 237/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5191\nEpoch 238/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5180\nEpoch 239/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5171\nEpoch 240/1000\n8/8 [==============================] - 0s 3ms/step - loss: 0.5160\nEpoch 241/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5150\nEpoch 242/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5141\nEpoch 243/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5132\nEpoch 244/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5122\nEpoch 245/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5112\nEpoch 246/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5103\nEpoch 247/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5095\nEpoch 248/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5084\nEpoch 249/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5075\nEpoch 250/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5067\nEpoch 251/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5056\nEpoch 252/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5047\nEpoch 253/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5037\nEpoch 254/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5029\nEpoch 255/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5019\nEpoch 256/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5010\nEpoch 257/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.5001\nEpoch 258/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4991\nEpoch 259/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4983\nEpoch 260/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4973\nEpoch 261/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4965\nEpoch 262/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4956\nEpoch 263/1000\n8/8 [==============================] - 0s 3ms/step - loss: 0.4946\nEpoch 264/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4937\nEpoch 265/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4928\nEpoch 266/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4918\nEpoch 267/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4909\nEpoch 268/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4900\nEpoch 269/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4892\nEpoch 270/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4883\nEpoch 271/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4875\nEpoch 272/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4865\nEpoch 273/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4857\nEpoch 274/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4847\nEpoch 275/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4839\nEpoch 276/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4830\nEpoch 277/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4821\nEpoch 278/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4812\nEpoch 279/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4803\nEpoch 280/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4795\nEpoch 281/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4785\nEpoch 282/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4777\nEpoch 283/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4769\nEpoch 284/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4760\nEpoch 285/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4752\nEpoch 286/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4743\nEpoch 287/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4735\nEpoch 288/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4726\nEpoch 289/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4717\nEpoch 290/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4709\nEpoch 291/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4700\nEpoch 292/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4692\nEpoch 293/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4685\nEpoch 294/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4676\nEpoch 295/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4666\nEpoch 296/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4658\nEpoch 297/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4650\nEpoch 298/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4641\nEpoch 299/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4633\nEpoch 300/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4624\nEpoch 301/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4617\nEpoch 302/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4608\nEpoch 303/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4600\nEpoch 304/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4592\nEpoch 305/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4584\nEpoch 306/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4576\nEpoch 307/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4568\nEpoch 308/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4559\nEpoch 309/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4551\nEpoch 310/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4543\nEpoch 311/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4535\nEpoch 312/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4527\nEpoch 313/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4519\nEpoch 314/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4511\nEpoch 315/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4504\nEpoch 316/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4495\nEpoch 317/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4488\nEpoch 318/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4480\nEpoch 319/1000\n8/8 [==============================] - 0s 4ms/step - loss: 0.4471\nEpoch 320/1000\n8/8 [==============================] - 0s 3ms/step - loss: 0.4466\nEpoch 321/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4457\nEpoch 322/1000\n8/8 [==============================] - 0s 3ms/step - loss: 0.4448\nEpoch 323/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4441\nEpoch 324/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4434\nEpoch 325/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4426\nEpoch 326/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4418\nEpoch 327/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4411\nEpoch 328/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4402\nEpoch 329/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4396\nEpoch 330/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4387\nEpoch 331/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4380\nEpoch 332/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4372\nEpoch 333/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4365\nEpoch 334/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4358\nEpoch 335/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4350\nEpoch 336/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4342\nEpoch 337/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4335\nEpoch 338/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4328\nEpoch 339/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4320\nEpoch 340/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4313\nEpoch 341/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4305\nEpoch 342/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4299\nEpoch 343/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4291\nEpoch 344/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4284\nEpoch 345/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4277\nEpoch 346/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4269\nEpoch 347/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4263\nEpoch 348/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4255\nEpoch 349/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4248\nEpoch 350/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4241\nEpoch 351/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4234\nEpoch 352/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4228\nEpoch 353/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4220\nEpoch 354/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4214\nEpoch 355/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4206\nEpoch 356/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4200\nEpoch 357/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4192\nEpoch 358/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4185\nEpoch 359/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4179\nEpoch 360/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4172\nEpoch 361/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4165\nEpoch 362/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4158\nEpoch 363/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4151\nEpoch 364/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4145\nEpoch 365/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4138\nEpoch 366/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4132\nEpoch 367/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4125\nEpoch 368/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4118\nEpoch 369/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4111\nEpoch 370/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4104\nEpoch 371/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4098\nEpoch 372/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4091\nEpoch 373/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4085\nEpoch 374/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4079\nEpoch 375/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4072\nEpoch 376/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4066\nEpoch 377/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4059\nEpoch 378/1000\n8/8 [==============================] - 0s 3ms/step - loss: 0.4053\nEpoch 379/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4046\nEpoch 380/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4040\nEpoch 381/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4034\nEpoch 382/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4027\nEpoch 383/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4022\nEpoch 384/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4015\nEpoch 385/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4008\nEpoch 386/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.4002\nEpoch 387/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3996\nEpoch 388/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3990\nEpoch 389/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3984\nEpoch 390/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3978\nEpoch 391/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3972\nEpoch 392/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3966\nEpoch 393/1000\n8/8 [==============================] - 0s 1ms/step - loss: 0.3960\nEpoch 394/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3954\nEpoch 395/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3948\nEpoch 396/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3941\nEpoch 397/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3935\nEpoch 398/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3929\nEpoch 399/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3924\nEpoch 400/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3918\nEpoch 401/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3912\nEpoch 402/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3906\nEpoch 403/1000\n8/8 [==============================] - 0s 3ms/step - loss: 0.3901\nEpoch 404/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3894\nEpoch 405/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3889\nEpoch 406/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3883\nEpoch 407/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3878\nEpoch 408/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3872\nEpoch 409/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3866\nEpoch 410/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3860\nEpoch 411/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3855\nEpoch 412/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3849\nEpoch 413/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3844\nEpoch 414/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3838\nEpoch 415/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3832\nEpoch 416/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3827\nEpoch 417/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3821\nEpoch 418/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3815\nEpoch 419/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3810\nEpoch 420/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3805\nEpoch 421/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3799\nEpoch 422/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3794\nEpoch 423/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3788\nEpoch 424/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3784\nEpoch 425/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3778\nEpoch 426/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3772\nEpoch 427/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3767\nEpoch 428/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3762\nEpoch 429/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3756\nEpoch 430/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3751\nEpoch 431/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3746\nEpoch 432/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3741\nEpoch 433/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3736\nEpoch 434/1000\n8/8 [==============================] - 0s 3ms/step - loss: 0.3731\nEpoch 435/1000\n8/8 [==============================] - 0s 3ms/step - loss: 0.3725\nEpoch 436/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3720\nEpoch 437/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3715\nEpoch 438/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3710\nEpoch 439/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3705\nEpoch 440/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3699\nEpoch 441/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3694\nEpoch 442/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3690\nEpoch 443/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3684\nEpoch 444/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3680\nEpoch 445/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3675\nEpoch 446/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3669\nEpoch 447/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3664\nEpoch 448/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3660\nEpoch 449/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3654\nEpoch 450/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3650\nEpoch 451/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3645\nEpoch 452/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3641\nEpoch 453/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3635\nEpoch 454/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3630\nEpoch 455/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3626\nEpoch 456/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3621\nEpoch 457/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3616\nEpoch 458/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3611\nEpoch 459/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3606\nEpoch 460/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3602\nEpoch 461/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3597\nEpoch 462/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3592\nEpoch 463/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3588\nEpoch 464/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3583\nEpoch 465/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3579\nEpoch 466/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3575\nEpoch 467/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3569\nEpoch 468/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3565\nEpoch 469/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3560\nEpoch 470/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3555\nEpoch 471/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3551\nEpoch 472/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3547\nEpoch 473/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3541\nEpoch 474/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3537\nEpoch 475/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3533\nEpoch 476/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3529\nEpoch 477/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3524\nEpoch 478/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3520\nEpoch 479/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3515\nEpoch 480/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3511\nEpoch 481/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3506\nEpoch 482/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3502\nEpoch 483/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3497\nEpoch 484/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3493\nEpoch 485/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3489\nEpoch 486/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3485\nEpoch 487/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3480\nEpoch 488/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3476\nEpoch 489/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3471\nEpoch 490/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3467\nEpoch 491/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3463\nEpoch 492/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3459\nEpoch 493/1000\n8/8 [==============================] - 0s 3ms/step - loss: 0.3454\nEpoch 494/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3451\nEpoch 495/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3446\nEpoch 496/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3442\nEpoch 497/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3438\nEpoch 498/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3433\nEpoch 499/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3430\nEpoch 500/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3425\nEpoch 501/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3421\nEpoch 502/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3417\nEpoch 503/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3413\nEpoch 504/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3409\nEpoch 505/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3404\nEpoch 506/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3400\nEpoch 507/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3396\nEpoch 508/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3393\nEpoch 509/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3388\nEpoch 510/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3384\nEpoch 511/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3380\nEpoch 512/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3376\nEpoch 513/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3372\nEpoch 514/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3368\nEpoch 515/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3364\nEpoch 516/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3360\nEpoch 517/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3357\nEpoch 518/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3352\nEpoch 519/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3348\nEpoch 520/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3344\nEpoch 521/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3340\nEpoch 522/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3336\nEpoch 523/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3332\nEpoch 524/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3329\nEpoch 525/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3325\nEpoch 526/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3321\nEpoch 527/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3317\nEpoch 528/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3313\nEpoch 529/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3309\nEpoch 530/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3306\nEpoch 531/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3302\nEpoch 532/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3298\nEpoch 533/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3294\nEpoch 534/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3290\nEpoch 535/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3287\nEpoch 536/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3283\nEpoch 537/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3279\nEpoch 538/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3275\nEpoch 539/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3272\nEpoch 540/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3267\nEpoch 541/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3264\nEpoch 542/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3260\nEpoch 543/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3256\nEpoch 544/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3253\nEpoch 545/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3249\nEpoch 546/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3245\nEpoch 547/1000\n8/8 [==============================] - 0s 3ms/step - loss: 0.3242\nEpoch 548/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3238\nEpoch 549/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3234\nEpoch 550/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3231\nEpoch 551/1000\n8/8 [==============================] - 0s 3ms/step - loss: 0.3227\nEpoch 552/1000\n8/8 [==============================] - 0s 3ms/step - loss: 0.3223\nEpoch 553/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3220\nEpoch 554/1000\n8/8 [==============================] - 0s 3ms/step - loss: 0.3216\nEpoch 555/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3213\nEpoch 556/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3209\nEpoch 557/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3205\nEpoch 558/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3201\nEpoch 559/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3198\nEpoch 560/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3194\nEpoch 561/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3191\nEpoch 562/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3187\nEpoch 563/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3184\nEpoch 564/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3180\nEpoch 565/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3177\nEpoch 566/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3173\nEpoch 567/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3169\nEpoch 568/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3166\nEpoch 569/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3163\nEpoch 570/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3159\nEpoch 571/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3155\nEpoch 572/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3152\nEpoch 573/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3149\nEpoch 574/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3145\nEpoch 575/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3142\nEpoch 576/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3138\nEpoch 577/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3134\nEpoch 578/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3131\nEpoch 579/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3127\nEpoch 580/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3124\nEpoch 581/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3120\nEpoch 582/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3117\nEpoch 583/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3114\nEpoch 584/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3110\nEpoch 585/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3107\nEpoch 586/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3103\nEpoch 587/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3100\nEpoch 588/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3097\nEpoch 589/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3093\nEpoch 590/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3090\nEpoch 591/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3086\nEpoch 592/1000\n8/8 [==============================] - 0s 1ms/step - loss: 0.3083\nEpoch 593/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3080\nEpoch 594/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3076\nEpoch 595/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3073\nEpoch 596/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3070\nEpoch 597/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3066\nEpoch 598/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3063\nEpoch 599/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3059\nEpoch 600/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3056\nEpoch 601/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3053\nEpoch 602/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3049\nEpoch 603/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3046\nEpoch 604/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3043\nEpoch 605/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3039\nEpoch 606/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3036\nEpoch 607/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3033\nEpoch 608/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3030\nEpoch 609/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3027\nEpoch 610/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3023\nEpoch 611/1000\n8/8 [==============================] - 0s 3ms/step - loss: 0.3020\nEpoch 612/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3017\nEpoch 613/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3013\nEpoch 614/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3010\nEpoch 615/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3007\nEpoch 616/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3003\nEpoch 617/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.3000\nEpoch 618/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2997\nEpoch 619/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2994\nEpoch 620/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2991\nEpoch 621/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2987\nEpoch 622/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2984\nEpoch 623/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2981\nEpoch 624/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2978\nEpoch 625/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2974\nEpoch 626/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2971\nEpoch 627/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2968\nEpoch 628/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2964\nEpoch 629/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2961\nEpoch 630/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2958\nEpoch 631/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2955\nEpoch 632/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2952\nEpoch 633/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2949\nEpoch 634/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2946\nEpoch 635/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2943\nEpoch 636/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2939\nEpoch 637/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2936\nEpoch 638/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2933\nEpoch 639/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2930\nEpoch 640/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2927\nEpoch 641/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2924\nEpoch 642/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2920\nEpoch 643/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2917\nEpoch 644/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2914\nEpoch 645/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2911\nEpoch 646/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2908\nEpoch 647/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2905\nEpoch 648/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2902\nEpoch 649/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2899\nEpoch 650/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2896\nEpoch 651/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2892\nEpoch 652/1000\n8/8 [==============================] - 0s 1ms/step - loss: 0.2889\nEpoch 653/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2886\nEpoch 654/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2884\nEpoch 655/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2880\nEpoch 656/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2877\nEpoch 657/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2874\nEpoch 658/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2871\nEpoch 659/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2868\nEpoch 660/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2865\nEpoch 661/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2861\nEpoch 662/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2859\nEpoch 663/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2855\nEpoch 664/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2853\nEpoch 665/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2850\nEpoch 666/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2847\nEpoch 667/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2843\nEpoch 668/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2840\nEpoch 669/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2838\nEpoch 670/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2834\nEpoch 671/1000\n8/8 [==============================] - 0s 1ms/step - loss: 0.2831\nEpoch 672/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2828\nEpoch 673/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2825\nEpoch 674/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2823\nEpoch 675/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2819\nEpoch 676/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2816\nEpoch 677/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2814\nEpoch 678/1000\n8/8 [==============================] - 0s 1ms/step - loss: 0.2810\nEpoch 679/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2808\nEpoch 680/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2805\nEpoch 681/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2802\nEpoch 682/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2799\nEpoch 683/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2796\nEpoch 684/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2792\nEpoch 685/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2790\nEpoch 686/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2787\nEpoch 687/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2784\nEpoch 688/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2781\nEpoch 689/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2778\nEpoch 690/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2775\nEpoch 691/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2772\nEpoch 692/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2769\nEpoch 693/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2766\nEpoch 694/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2763\nEpoch 695/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2760\nEpoch 696/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2757\nEpoch 697/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2755\nEpoch 698/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2752\nEpoch 699/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2749\nEpoch 700/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2746\nEpoch 701/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2743\nEpoch 702/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2740\nEpoch 703/1000\n8/8 [==============================] - 0s 1ms/step - loss: 0.2737\nEpoch 704/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2735\nEpoch 705/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2731\nEpoch 706/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2728\nEpoch 707/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2726\nEpoch 708/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2723\nEpoch 709/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2720\nEpoch 710/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2717\nEpoch 711/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2714\nEpoch 712/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2711\nEpoch 713/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2709\nEpoch 714/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2706\nEpoch 715/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2703\nEpoch 716/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2700\nEpoch 717/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2697\nEpoch 718/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2694\nEpoch 719/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2692\nEpoch 720/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2689\nEpoch 721/1000\n8/8 [==============================] - 0s 1ms/step - loss: 0.2686\nEpoch 722/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2683\nEpoch 723/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2680\nEpoch 724/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2677\nEpoch 725/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2675\nEpoch 726/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2672\nEpoch 727/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2669\nEpoch 728/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2666\nEpoch 729/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2664\nEpoch 730/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2661\nEpoch 731/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2658\nEpoch 732/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2655\nEpoch 733/1000\n8/8 [==============================] - 0s 3ms/step - loss: 0.2652\nEpoch 734/1000\n8/8 [==============================] - 0s 3ms/step - loss: 0.2650\nEpoch 735/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2647\nEpoch 736/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2644\nEpoch 737/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2642\nEpoch 738/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2638\nEpoch 739/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2636\nEpoch 740/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2633\nEpoch 741/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2631\nEpoch 742/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2628\nEpoch 743/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2625\nEpoch 744/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2622\nEpoch 745/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2619\nEpoch 746/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2617\nEpoch 747/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2614\nEpoch 748/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2611\nEpoch 749/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2609\nEpoch 750/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2606\nEpoch 751/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2603\nEpoch 752/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2601\nEpoch 753/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2598\nEpoch 754/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2595\nEpoch 755/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2593\nEpoch 756/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2590\nEpoch 757/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2587\nEpoch 758/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2585\nEpoch 759/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2582\nEpoch 760/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2579\nEpoch 761/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2576\nEpoch 762/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2574\nEpoch 763/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2571\nEpoch 764/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2568\nEpoch 765/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2566\nEpoch 766/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2563\nEpoch 767/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2560\nEpoch 768/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2558\nEpoch 769/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2555\nEpoch 770/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2553\nEpoch 771/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2550\nEpoch 772/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2548\nEpoch 773/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2545\nEpoch 774/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2542\nEpoch 775/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2540\nEpoch 776/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2537\nEpoch 777/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2535\nEpoch 778/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2532\nEpoch 779/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2529\nEpoch 780/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2527\nEpoch 781/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2524\nEpoch 782/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2522\nEpoch 783/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2519\nEpoch 784/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2517\nEpoch 785/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2514\nEpoch 786/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2512\nEpoch 787/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2509\nEpoch 788/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2506\nEpoch 789/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2504\nEpoch 790/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2501\nEpoch 791/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2498\nEpoch 792/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2496\nEpoch 793/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2494\nEpoch 794/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2491\nEpoch 795/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2489\nEpoch 796/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2486\nEpoch 797/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2484\nEpoch 798/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2481\nEpoch 799/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2479\nEpoch 800/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2476\nEpoch 801/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2473\nEpoch 802/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2471\nEpoch 803/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2468\nEpoch 804/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2466\nEpoch 805/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2464\nEpoch 806/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2461\nEpoch 807/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2458\nEpoch 808/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2456\nEpoch 809/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2453\nEpoch 810/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2451\nEpoch 811/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2449\nEpoch 812/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2446\nEpoch 813/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2444\nEpoch 814/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2442\nEpoch 815/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2439\nEpoch 816/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2437\nEpoch 817/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2434\nEpoch 818/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2432\nEpoch 819/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2429\nEpoch 820/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2427\nEpoch 821/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2424\nEpoch 822/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2422\nEpoch 823/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2420\nEpoch 824/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2417\nEpoch 825/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2415\nEpoch 826/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2412\nEpoch 827/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2410\nEpoch 828/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2408\nEpoch 829/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2405\nEpoch 830/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2403\nEpoch 831/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2401\nEpoch 832/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2398\nEpoch 833/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2396\nEpoch 834/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2393\nEpoch 835/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2391\nEpoch 836/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2389\nEpoch 837/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2386\nEpoch 838/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2384\nEpoch 839/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2382\nEpoch 840/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2379\nEpoch 841/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2377\nEpoch 842/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2375\nEpoch 843/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2372\nEpoch 844/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2370\nEpoch 845/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2368\nEpoch 846/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2366\nEpoch 847/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2363\nEpoch 848/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2361\nEpoch 849/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2359\nEpoch 850/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2356\nEpoch 851/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2354\nEpoch 852/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2352\nEpoch 853/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2350\nEpoch 854/1000\n8/8 [==============================] - 0s 3ms/step - loss: 0.2347\nEpoch 855/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2345\nEpoch 856/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2343\nEpoch 857/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2341\nEpoch 858/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2338\nEpoch 859/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2336\nEpoch 860/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2334\nEpoch 861/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2332\nEpoch 862/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2329\nEpoch 863/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2327\nEpoch 864/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2325\nEpoch 865/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2323\nEpoch 866/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2321\nEpoch 867/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2318\nEpoch 868/1000\n8/8 [==============================] - 0s 1ms/step - loss: 0.2316\nEpoch 869/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2314\nEpoch 870/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2312\nEpoch 871/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2309\nEpoch 872/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2307\nEpoch 873/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2305\nEpoch 874/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2303\nEpoch 875/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2301\nEpoch 876/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2299\nEpoch 877/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2296\nEpoch 878/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2294\nEpoch 879/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2292\nEpoch 880/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2290\nEpoch 881/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2288\nEpoch 882/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2286\nEpoch 883/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2283\nEpoch 884/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2281\nEpoch 885/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2279\nEpoch 886/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2277\nEpoch 887/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2275\nEpoch 888/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2273\nEpoch 889/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2271\nEpoch 890/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2269\nEpoch 891/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2266\nEpoch 892/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2264\nEpoch 893/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2262\nEpoch 894/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2260\nEpoch 895/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2258\nEpoch 896/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2256\nEpoch 897/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2254\nEpoch 898/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2252\nEpoch 899/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2250\nEpoch 900/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2248\nEpoch 901/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2246\nEpoch 902/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2244\nEpoch 903/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2242\nEpoch 904/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2239\nEpoch 905/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2238\nEpoch 906/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2235\nEpoch 907/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2233\nEpoch 908/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2231\nEpoch 909/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2229\nEpoch 910/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2227\nEpoch 911/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2225\nEpoch 912/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2223\nEpoch 913/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2221\nEpoch 914/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2219\nEpoch 915/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2217\nEpoch 916/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2215\nEpoch 917/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2213\nEpoch 918/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2211\nEpoch 919/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2209\nEpoch 920/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2207\nEpoch 921/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2205\nEpoch 922/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2203\nEpoch 923/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2201\nEpoch 924/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2199\nEpoch 925/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2197\nEpoch 926/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2195\nEpoch 927/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2193\nEpoch 928/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2191\nEpoch 929/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2190\nEpoch 930/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2188\nEpoch 931/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2186\nEpoch 932/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2184\nEpoch 933/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2182\nEpoch 934/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2180\nEpoch 935/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2178\nEpoch 936/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2176\nEpoch 937/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2174\nEpoch 938/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2172\nEpoch 939/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2170\nEpoch 940/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2168\nEpoch 941/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2166\nEpoch 942/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2165\nEpoch 943/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2163\nEpoch 944/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2161\nEpoch 945/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2159\nEpoch 946/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2157\nEpoch 947/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2155\nEpoch 948/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2153\nEpoch 949/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2151\nEpoch 950/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2150\nEpoch 951/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2148\nEpoch 952/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2146\nEpoch 953/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2144\nEpoch 954/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2142\nEpoch 955/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2141\nEpoch 956/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2139\nEpoch 957/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2137\nEpoch 958/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2135\nEpoch 959/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2133\nEpoch 960/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2131\nEpoch 961/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2129\nEpoch 962/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2128\nEpoch 963/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2126\nEpoch 964/1000\n8/8 [==============================] - 0s 1ms/step - loss: 0.2124\nEpoch 965/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2122\nEpoch 966/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2120\nEpoch 967/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2119\nEpoch 968/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2117\nEpoch 969/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2115\nEpoch 970/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2113\nEpoch 971/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2112\nEpoch 972/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2110\nEpoch 973/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2108\nEpoch 974/1000\n8/8 [==============================] - 0s 3ms/step - loss: 0.2106\nEpoch 975/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2105\nEpoch 976/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2103\nEpoch 977/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2101\nEpoch 978/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2099\nEpoch 979/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2098\nEpoch 980/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2096\nEpoch 981/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2094\nEpoch 982/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2092\nEpoch 983/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2091\nEpoch 984/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2089\nEpoch 985/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2087\nEpoch 986/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2085\nEpoch 987/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2084\nEpoch 988/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2082\nEpoch 989/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2080\nEpoch 990/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2079\nEpoch 991/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2077\nEpoch 992/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2075\nEpoch 993/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2074\nEpoch 994/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2072\nEpoch 995/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2070\nEpoch 996/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2069\nEpoch 997/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2067\nEpoch 998/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2065\nEpoch 999/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2063\nEpoch 1000/1000\n8/8 [==============================] - 0s 2ms/step - loss: 0.2062\n" ], [ "model.predict(vector_x)", "_____no_output_____" ], [ "[list(vectorizer.vocabulary_.keys())[0]]", "_____no_output_____" ], [ "vectorizer.transform([list(vectorizer.vocabulary_.keys())[1]]).toarray()", "_____no_output_____" ], [ "model.summary()", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_1 (Dense) (None, 3) 18 \n_________________________________________________________________\ndense_2 (Dense) (None, 5) 20 \n=================================================================\nTotal params: 38\nTrainable params: 38\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "from keras.models import Model\nlayer_name = 'dense_1'\nintermediate_layer_model = Model(inputs=model.input,\n outputs=model.get_layer(layer_name).output)", "_____no_output_____" ], [ "for i in range(len(vectorizer.vocabulary_)):\n word = list(vectorizer.vocabulary_.keys())[i]\n word_vec = vectorizer.transform([list(vectorizer.vocabulary_.keys())[i]]).toarray()\n print(word, '\\t', intermediate_layer_model.predict(word_vec))", "i \t [[ 0.19378224 -0.6309615 1.8917809 ]]\nenjoy \t [[-0.7392523 -1.0391831 0.5767987]]\nplaying \t [[-0.8518311 -1.8652351 -0.83392346]]\ntt \t [[-1.313276 1.1554667 0.58792263]]\nlike \t [[-0.9066584 -0.8294611 0.663727 ]]\n" ] ], [ [ "# Measuring similarity between word vectors", "_____no_output_____" ] ], [ [ "a = word = list(vectorizer.vocabulary_.keys())[1]\nword_vec_a = intermediate_layer_model.predict(vectorizer.transform([list(vectorizer.vocabulary_.keys())[1]]).toarray())\n\nb = word = list(vectorizer.vocabulary_.keys())[4]\nword_vec_b = intermediate_layer_model.predict(vectorizer.transform([list(vectorizer.vocabulary_.keys())[4]]).toarray())", "_____no_output_____" ], [ "word_vec_a", "_____no_output_____" ], [ "np.sum(word_vec_a*word_vec_b)/((np.sqrt(np.sum(np.square(word_vec_a))))*np.sqrt(np.sum(np.square(word_vec_b))))", "_____no_output_____" ], [ "np.sum(np.square(word_vec_a - word_vec_b))", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
c52fd34094910c135cdcfd98ad32e3958496603b
19,389
ipynb
Jupyter Notebook
Listas/MNE_L04_VINICIUS_CANTUARIA_14_0165169.ipynb
cantuariavc/metodos-numericos-para-engenharia
c1416efb152fd64c770c2a561b746fa189b4522d
[ "MIT" ]
null
null
null
Listas/MNE_L04_VINICIUS_CANTUARIA_14_0165169.ipynb
cantuariavc/metodos-numericos-para-engenharia
c1416efb152fd64c770c2a561b746fa189b4522d
[ "MIT" ]
null
null
null
Listas/MNE_L04_VINICIUS_CANTUARIA_14_0165169.ipynb
cantuariavc/metodos-numericos-para-engenharia
c1416efb152fd64c770c2a561b746fa189b4522d
[ "MIT" ]
null
null
null
27.193548
526
0.492289
[ [ [ "# Lista de Exercícios 4\n\nMétodos Numéricos para Engenharia - Turma C \nNome: Vinícius de Castro Cantuária \nMatrícula: 14/0165169", "_____no_output_____" ], [ "Observações:\n\n0. A lista de exercícios deve ser entregue no moodle da disciplina.\n0. A lista de exercícios deve ser respondida neste único arquivo (.ipynb). Responda a cada questão na célula imediatamente abaixo do seu enunciado.\n0. Não se esqueça de alterar o nome do arquivo e o cabeçalho acima, colocando seu nome e matrícula.\n0. A lista é uma atividade avaliativa e individual. Não será tolerado qualquer tipo de plágio.", "_____no_output_____" ] ], [ [ "# Deixe-me incluir o conjunto de módulos do Python científico para você.\n%pylab inline", "Populating the interactive namespace from numpy and matplotlib\n" ] ], [ [ "---\n\n## Questão 01\n\nPara implementar o Método da Eliminação de Gauss, um método analítico para solucionar sistemas lineares, é preciso primeiro gerar uma matriz aumentada, resultado da concatenação da matriz de coeficientes com o vetor de termos independentes. Leia um valor `N`, uma matriz de coeficientes e um vetor de termos independentes de um sistema linear de tamanho `N` e imprima a matriz aumentada gerada.", "_____no_output_____" ] ], [ [ "N = int(input())\n\nA = np.zeros((N,N))\nfor i in range(N):\n A[i] = [float(x) for x in input().split()]\n\nb = np.array([float(x) for x in input().split()])\n\nG = np.hstack((A, b[:, None]))\nprint(G)", "1\n1\n1\n[[1. 1.]]\n" ] ], [ [ "---\n\n## Questão 02\n\nO Método da Eliminação de Gauss é um método analítico de resolução de sistemas lineares.\n\nDado o sistema linear $Ax = b$, onde:\n\n$$\nA =\n\\begin{bmatrix}\n13 & 7 & 3 \\\\\n5 & 19 & 1 \\\\\n2 & 11 & 23\n\\end{bmatrix},\\ b =\n\\begin{bmatrix}\n31 \\\\\n17 \\\\\n29\n\\end{bmatrix}\n$$\n\nEncontre a matriz aumentada e calcule a matriz triangular superior utilizando o pivoteamento parcial do Método da Eliminação de Gauss. Não utilize funções prontas, como \"`np.linalg.solve()`\", para realizar o cálculo. Este exercício não contém entradas.\n\n#### Saída Esperada\n\n```\n[[ 13. 7. 3. 31. ]\n [ 0. 16.30769231 -0.15384615 5.07692308]\n [ 0. 0. 22.63207547 21.14150943]]\n```", "_____no_output_____" ] ], [ [ "A = np.array([[13, 7, 3],\n [ 5, 19, 1],\n [ 2, 11, 23.0]])\nb = np.array([31, 17, 29.0])\n\nN = len(b)\nG = np.hstack((A, b[:, None]))\n\nfor i in range(N-1):\n for j in range(i+1, N):\n G[j,:] -= G[j,i] / G[i,i] * G[i,:]\n\nprint(G)", "[[13. 7. 3. 31. ]\n [ 0. 16.30769231 -0.15384615 5.07692308]\n [ 0. 0. 22.63207547 21.14150943]]\n" ] ], [ [ "---\n\n## Questão 03\n\nO Método da Eliminação de Gauss com pivoteamento completo, também chamado de Método de Gauss-Jordan, é um método analítico utilizado para encontrar a solução de sistemas lineares, mas também pode ser utilizado para para encontrar a inversa de matrizes quadradas não-singulares.\n\nDado o sistema linear $Ax = b$, onde:\n\n$$\nA =\n\\begin{bmatrix}\n13 & 7 & 3 \\\\\n5 & 19 & 1 \\\\\n2 & 11 & 23\n\\end{bmatrix},\\ b =\n\\begin{bmatrix}\n31 \\\\\n17 \\\\\n29\n\\end{bmatrix}\n$$\n\nUtilize o Método de Gauss-Jordan para encontrar o vetor solução do sistema linear acima. Não utilize funções prontas, como \"`np.linalg.solve()`\", para realizar o cálculo. Este exercício não contém entradas.\n\n#### Saída Esperada\n\n```\n[ 1.99666528 0.32013339 0.93413922]\n```", "_____no_output_____" ] ], [ [ "A = np.array([[13, 7, 3],\n [ 5, 19, 1],\n [ 2, 11, 23.0]])\nb = np.array([31, 17, 29.0])\n\nN = len(b)\nG = np.hstack((A, b[:, None]))\n\nfor i in range(N):\n G[i] = G[i] / G[i,i]\n for j in range(i+1, N):\n G[j,:] -= G[j,i] * G[i,:]\n\nfor i in range(N-1, -1, -1):\n for j in range(i-1, -1, -1):\n G[j,:] -= G[j,i] * G[i,:]\n\nprint(G[:,N])", "[1.99666528 0.32013339 0.93413922]\n" ] ], [ [ "---\n\n## Questão 04\n\nO Método de Gauss-Jordan também pode ser utilizado para encontrar a inversa de matrizes quadradas não-singulares. Para isso, é preciso concatenar à matriz de coeficientes, uma matriz identidade ao invés de o vetor de termos independentes.\n\nUtilizando a mesma matriz do exercício acima:\n\n$$\nA =\n\\begin{bmatrix}\n13 & 7 & 3 \\\\\n5 & 19 & 1 \\\\\n2 & 11 & 23\n\\end{bmatrix}\n$$\n\nCalcule $A^{-1}$ utilizando o método de Gauss-Jordan. Não utilize funções prontas para realizar o cálculo.\n\n#### Saída Esperada\n\n```\n[[ 0.08878699 -0.02667778 -0.01042101]\n [-0.02355148 0.06106711 0.00041684]\n [ 0.00354314 -0.0268862 0.04418508]]\n```", "_____no_output_____" ] ], [ [ "N = 3\nA = np.array([[13, 7, 3],\n [ 5, 19, 1],\n [ 2, 11, 23.0]])\n\nG = np.hstack((A, np.identity(N)))\n\nfor i in range(N):\n G[i] = G[i] / G[i,i]\n for j in range(i+1, N):\n G[j,:] -= G[j,i] * G[i,:]\n\nfor i in range(N-1, -1, -1):\n for j in range(i-1, -1, -1):\n G[j,:] -= G[j,i] * G[i,:]\n\nprint(G[:,N:])", "[[ 0.08878699 -0.02667778 -0.01042101]\n [-0.02355148 0.06106711 0.00041684]\n [ 0.00354314 -0.0268862 0.04418508]]\n" ] ], [ [ "---\n\n## Questão 05\n\nO Método de Jacobi é um método utilizado para encontrar a solução de sistemas lineares de forma numérica, ou seja, por aproximações sucessivas. Uma condição suficiente para que a solução aproximada convirja para a solução correta do sistema linear é: a matriz de coeficientes do sistema linear deve ser estritamente diagonal dominante. Uma matriz estritamente diagonal dominante satisfaz a seguinte equação:\n\n$$\n|a_{ii}| > \\sum_{j \\neq i}{|a_{ij}|}\n$$\n\nUtilizando a mesma matriz dos exercícios acima:\n\n$$\nA =\n\\begin{bmatrix}\n13 & 7 & 3 \\\\\n5 & 19 & 1 \\\\\n2 & 11 & 23\n\\end{bmatrix}\n$$\n\nVerifique se ela é uma matriz estritamente diagonal dominante.\n\n#### Saída Esperada\n\nA matriz \"A\" é uma matriz estritamente diagonal dominante.", "_____no_output_____" ], [ "## N = 3\nA = np.array([[13, 7, 3],\n [ 5, 19, 1],\n [ 2, 11, 23.0]])\n\nD = np.zeros(N)\nR = np.zeros(N)\nfor i in range(N):\n D[i] = abs(A[i,i])\n for j in range(N):\n R[i] += abs(A[i,j]) if i != j else 0\n\nis_diag_dom = True\nfor i in range(N):\n if D[i] <= R[i]:\n is_diag_dom = False\n break\n\nif is_diag_dom:\n print('A matriz \"A\" é uma matriz estritamente diagonal dominante.')\nelse:\n print('A matriz \"A\" não é uma matriz estritamente diagonal dominante.')", "_____no_output_____" ], [ "---\n\n## Questão 06\n\nO Método de Jacobi é um método utilizado para encontrar a solução de sistemas lineares de forma numérica, ou seja, por aproximações sucessivas. O método funciona da seguinte forma: dado um sistema linear $Ax = b$, divimos a matriz de coeficientes $A$ em $D+R$, onde $D$ é a matriz diagonal formada pela diagonal principal da matriz $A$, e $R$ é a matriz de resíduos, que é a matriz $A$ com todos os elementos da diagonal principal iguais a zero. A próxima aproximação para $x$, é calculada pela equação:\n\n$$\nx^{(novo)} = D^{-1}(b - Rx^{(antigo)})\n$$\n\nUtilizando o mesmo sistema linear $Ax = b$ dos exercícios anteriores:\n\n$$\nA =\n\\begin{bmatrix}\n13 & 7 & 3 \\\\\n5 & 19 & 1 \\\\\n2 & 11 & 23\n\\end{bmatrix}, b =\n\\begin{bmatrix}\n31 \\\\\n17 \\\\\n29\n\\end{bmatrix}\n$$\n\nIniciando pela aproximação `x = np.zeros(3)`, encontre as 10 (dez) primeiras aproximações da solução do sistema linear acima, utilizando o Método de Jacobi. Não utilize funções prontas na implementação deste exercício. Este exercício não contém entradas.\n\n#### Saída Esperada\n\n```\n[ 0. 0. 0.]\n[ 2.38461538 0.89473684 1.26086957]\n[ 1.61186411 0.20084492 0.62559409]\n[ 2.13210025 0.43763607 1.0246512 ]\n[ 1.91250722 0.27972882 0.86616533]\n[ 2.03410787 0.34585782 0.96078124]\n[ 1.9766655 0.30887786 0.91858036]\n[ 2.00631645 0.32621537 0.94126141]\n[ 1.99174678 0.31721875 0.93039122]\n[ 1.99909962 0.32162499 0.93596088]\n```", "_____no_output_____" ] ], [ [ "A = np.array([[13, 7, 3],\n [ 5, 19, 1],\n [ 2, 11, 23.0]])\nb = np.array([31, 17, 29.0])\n\nN = len(b)\nx = np.zeros(N)\n\nD = np.zeros(N)\nR = np.zeros((N,N))\nfor i in range(N):\n for j in range(N):\n R[i,j] = A[i,j]\n\nfor i in range(N):\n D[i] = 1.0 / A[i,i]\n R[i,i] = 0\n\nx_next = D * (b - R.dot(x[:, None])[:,0])\nfor i in range(10):\n print(x)\n x = x_next\n x_next = D * (b - R.dot(x[:, None])[:,0])", "[0. 0. 0.]\n[2.38461538 0.89473684 1.26086957]\n[1.61186411 0.20084492 0.62559409]\n[2.13210025 0.43763607 1.0246512 ]\n[1.91250722 0.27972882 0.86616533]\n[2.03410787 0.34585782 0.96078124]\n[1.9766655 0.30887786 0.91858036]\n[2.00631645 0.32621537 0.94126141]\n[1.99174678 0.31721875 0.93039122]\n[1.99909962 0.32162499 0.93596088]\n" ] ], [ [ "---\n\n## Questão 07\n\nQuestões criadas para treinar a utilização da biblioteca \"numpy\" para manipulação de matrizes. Resolva os exercícios abaixo.", "_____no_output_____" ], [ "**7.1.** Declare uma matriz `A` de tamanho `3x4` utilizando a função `np.array()` com quaisquer valores e, em seguida, imprima-a.", "_____no_output_____" ] ], [ [ "A = np.array([[13, 7, 3, 4],\n [ 5, 19, 1, 4],\n [ 2, 11, 23.0, 6]])\nprint(A)", "[[13. 7. 3. 4.]\n [ 5. 19. 1. 4.]\n [ 2. 11. 23. 6.]]\n" ] ], [ [ "**7.2.** Utilizando a matriz definida acima, agora imprima sua transposta.", "_____no_output_____" ] ], [ [ "print(A.T)", "[[13. 5. 2.]\n [ 7. 19. 11.]\n [ 3. 1. 23.]\n [ 4. 4. 6.]]\n" ] ], [ [ "**7.3.** Imprima toda a segunda linha da matriz $A$ original definida no item 7.1.", "_____no_output_____" ] ], [ [ "print(A[1,:])", "[ 5. 19. 1. 4.]\n" ] ], [ [ "**7.4.** Imprima a segunda e a terceira colunas da segunda linha da matriz transposta $A^T$ gerada no item 7.2.", "_____no_output_____" ] ], [ [ "print(A.T[1,1:3])", "[19. 11.]\n" ] ], [ [ "**7.5.** Imprima o seguinte produto matricial: $AA^T$.", "_____no_output_____" ] ], [ [ "print(A.dot(A.T))", "[[243. 217. 196.]\n [217. 403. 266.]\n [196. 266. 690.]]\n" ] ], [ [ "---\n\n## Questão 08\n\nO Método de Gauss-Seidel é um método numérico utilizado para encontrar a solução de sistemas lineares. O método funciona da seguinte forma: dado um sistema linear $Ax = b$, divimos a matriz de coeficientes $A$ em $L_*+U$, onde $L_*$ é uma matriz triangular inferior formada pelos elementos da diagonal principal e abaixo da diagonal principal da matriz $A$, e $U$ é uma matriz triangular superior formada pelos elementos estritamente acima da diagonal princiapl da matriz $A$. $x$ é calculado iterativamente da forma:\n\n$$\nx^{(k+1)} = L_*^{-1}(b - Ux^{(k)})\n$$\n\nUtilizando o mesmo sistema linear abaixo:\n\n$$\nA =\n\\begin{bmatrix}\n13 & 7 & 3 \\\\\n5 & 19 & 1 \\\\\n2 & 11 & 23\n\\end{bmatrix},\\ b =\n\\begin{bmatrix}\n31 \\\\\n17 \\\\\n29\n\\end{bmatrix}\n$$\n\nIniciando pela aproximação `x = np.zeros(3)`, encontre as 10 (dez) primeiras aproximações da solução do sistema linear, utilizando o Método de Gauss-Seidel. Não utilize funções prontas na implementação deste exercício. Este exercício não contém entradas.\n\n#### Saída Esperada\n\n```\n[ 0. 0. 0.]\n[ 2.38461538 0.89473684 1.26086957]\n[ 1.61186411 0.20084492 0.62559409]\n[ 2.13210025 0.43763607 1.0246512 ]\n[ 1.91250722 0.27972882 0.86616533]\n[ 2.03410787 0.34585782 0.96078124]\n[ 1.9766655 0.30887786 0.91858036]\n[ 2.00631645 0.32621537 0.94126141]\n[ 1.99174678 0.31721875 0.93039122]\n[ 1.99909962 0.32162499 0.93596088]\n```", "_____no_output_____" ], [ "---\n\n## Questão 09\n\nNa mesma linha das questões 06 e 08, utilize os Métodos de Jacobi e Gauss-Seidel para encontrar a solução do sistema abaixo:\n\n$$\nA =\n\\begin{bmatrix}\n13 & 7 & 3 \\\\\n5 & 19 & 1 \\\\\n2 & 11 & 23\n\\end{bmatrix},\\ b =\n\\begin{bmatrix}\n31 \\\\\n17 \\\\\n29\n\\end{bmatrix}\n$$\n\nIniciando pela aproximação `x = np.zeros(3)`. Mostre a primeira aproximação $x^k$ tal que nenhuma das diferenças absolutas $|x_i^k - x_i^{k-1}|$ sejam maiores que $10^{-8}$. Mostre também a quantidade de iterações necessárias e o vetor de resíduos ($e = Ax^k - b$) encontrado, para ambos os métodos.\n\n#### Saída Esperada\n\n```\nMétodo de Jacobi: 27 iterações\nx = [1.99666527 0.32013339 0.93413922]\ne = [-9.07208886e-08 -8.03053837e-08 -1.20908751e-07] \n\nMétodo de Gauss-Seidel: 9 iterações\nx = [1.99666528 0.32013339 0.93413922]\ne = [ 4.54047218e-08 -1.17981358e-09 0.00000000e+00]\n```", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
c52fd745be8b60cbcc329eac2f4acf46acc88543
358,199
ipynb
Jupyter Notebook
homework3/109598033_109598001_hw3_final.ipynb
maskerTim/bdm2021f
f2ae406462829dd6f4fb59c1f8ebc15f0f739b02
[ "Apache-2.0" ]
null
null
null
homework3/109598033_109598001_hw3_final.ipynb
maskerTim/bdm2021f
f2ae406462829dd6f4fb59c1f8ebc15f0f739b02
[ "Apache-2.0" ]
null
null
null
homework3/109598033_109598001_hw3_final.ipynb
maskerTim/bdm2021f
f2ae406462829dd6f4fb59c1f8ebc15f0f739b02
[ "Apache-2.0" ]
null
null
null
16.940128
2,815
0.329621
[ [ [ "import numpy as np\nfrom pyspark import SparkConf, SparkContext\nfrom pyspark.sql import SparkSession\nimport time\nimport re", "_____no_output_____" ], [ "spark=SparkSession.builder\\\n .config(\"spark.debug.maxToStringFields\", 100000)\\\n .config(\"spark.local.dir\", '/home/osboxes/hw/hw3/')\\\n .appName(\"hw3\")\\\n .getOrCreate()\nsc=spark.sparkContext", "Setting default log level to \"WARN\".\nTo adjust logging level use sc.setLogLevel(newLevel). For SparkR, use setLogLevel(newLevel).\n2021-12-02 08:35:52,167 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable\n2021-12-02 08:35:52,413 WARN spark.SparkConf: Note that spark.local.dir will be overridden by the value set by the cluster manager (via SPARK_LOCAL_DIRS in mesos/standalone/kubernetes and LOCAL_DIRS in YARN).\n" ], [ "path = \"/home/osboxes/hw/hw3/\"", "_____no_output_____" ], [ "k=3", "_____no_output_____" ], [ "data = sc.wholeTextFiles(\"file:\"+path+\"datasets/reut2-*\")", "_____no_output_____" ] ], [ [ "# Q1", "_____no_output_____" ] ], [ [ "#data = sc.wholeTextFiles('reut2-*')\narticles = data.map(lambda x:x[1]).flatMap(lambda x:x.split('<BODY>')[1:]).map(lambda x:x.split('</BODY>')[0])\\\n .map(lambda x:re.sub(' +', ' ', x.replace('\\n', ' ')))\n\narticles.take(3)", " \r" ], [ "k = 3\nshingles = articles.flatMap(lambda x:[x[i:i+k] for i in range(len(x)-k+1)]).distinct()\n\nshingles.take(5)\n", " \r" ], [ "shingles_count = shingles.count()\narticles_count = articles.count()\nprint(shingles_count, ': # of distinct shingles.')\nprint(articles_count, ': # of distinct articles.')", "[Stage 5:=============================> (1 + 1) / 2]\r" ], [ "articles = articles.collect()\nkShinglingMatrix = shingles.map(lambda s:[1 if s in a else 0 for a in articles])", " \r" ], [ "kShinglingMatrix.coalesce(1).saveAsTextFile(\"file:\"+path+'outputs/kshingling')", " \r" ] ], [ [ "# Q2", "_____no_output_____" ] ], [ [ "import random", "_____no_output_____" ], [ "def biggerThanNFirstPrime(N):\n p = 2\n while True:\n isPrime = True\n for i in range(2,p//2+1):\n if(p%i==0):\n isPrime = False\n break\n if isPrime and p > N:\n return p\n else:\n p+=1", "_____no_output_____" ], [ "h = 100\na = [random.randint(0, 100) for i in range(h)]\nb = [random.randint(0, 100) for i in range(h)]\np = biggerThanNFirstPrime(articles_count)\nN = articles_count\n\ndef rowHash(row, a, b, p, N):\n return ((a*row+b)%p)%N", "_____no_output_____" ], [ "kShinglesMatrixZipWithIndex = kShinglingMatrix.zipWithIndex().cache()", " \r" ], [ "minHashSignatures = []\nkShinglesMatrixZipWithIndex = kShinglingMatrix.zipWithIndex().cache()\nfor i in range(h):\n minHashSignatures.append(kShinglesMatrixZipWithIndex\\\n .map(lambda x:[rowHash(x[1], a[i], b[i], p ,N) if c == 1 else (articles_count + 10) for c in x[0]])\\\n .reduce(lambda x, y:[Mx if Mx < My else My for Mx, My in zip(x, y)]))\n print(str(i) + '\\n')", " \r" ], [ "count = 0\nwith open(path+'outputs/minHashSignatures.txt', 'w') as result:\n for row in minHashSignatures:\n result.write(str(row) + '\\n')\n print(count)\n count += 1", "0\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n20\n21\n22\n23\n24\n25\n26\n27\n28\n29\n30\n31\n32\n33\n34\n35\n36\n37\n38\n39\n40\n41\n42\n43\n44\n45\n46\n47\n48\n49\n50\n51\n52\n53\n54\n55\n56\n57\n58\n59\n60\n61\n62\n63\n64\n65\n66\n67\n68\n69\n70\n71\n72\n73\n74\n75\n76\n77\n78\n79\n80\n81\n82\n83\n84\n85\n86\n87\n88\n89\n90\n91\n92\n93\n94\n95\n96\n97\n98\n99\n" ] ], [ [ "# Q3", "_____no_output_____" ] ], [ [ "import numpy as np\nfrom operator import add\nbands = 20\nr = 5\nsimilarRate = 0.8\nbuckets = articles_count\nhashFuct = [[random.randint(0, 100) for i in range(r + 1)] for j in range(bands)]\n\nwith open(path+'outputs/candidatePairs.txt', 'w') as result:\n for i in range(articles_count):#12\n candidatePairs = list()\n for j in range(bands):\n band = np.array(minHashSignatures[j*r:j*r+r]).T\n band = [(np.array(article).dot(np.array(hashFuct[j][:r])) + hashFuct[j][-1]) % buckets for article in band]\n for k, article in enumerate(band):\n if k > i and (article == band[i]).all():\n candidatePairs.append(k)\n candidatePairs = [(article, candidatePairs.count(article)) for article in set(candidatePairs)]\n candidatePairsTreshold = list()\n for candidatePair in candidatePairs:\n if candidatePair[1] >= bands*similarRate:\n candidatePairsTreshold.append(candidatePair[0])\n result.write('Articles' + str(i) + ':' + str(candidatePairsTreshold) + '\\n')\n print(str(i))", "0\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n20\n21\n22\n23\n24\n25\n26\n27\n28\n29\n30\n31\n32\n33\n34\n35\n36\n37\n38\n39\n40\n41\n42\n43\n44\n45\n46\n47\n48\n49\n50\n51\n52\n53\n54\n55\n56\n57\n58\n59\n60\n61\n62\n63\n64\n65\n66\n67\n68\n69\n70\n71\n72\n73\n74\n75\n76\n77\n78\n79\n80\n81\n82\n83\n84\n85\n86\n87\n88\n89\n90\n91\n92\n93\n94\n95\n96\n97\n98\n99\n100\n101\n102\n103\n104\n105\n106\n107\n108\n109\n110\n111\n112\n113\n114\n115\n116\n117\n118\n119\n120\n121\n122\n123\n124\n125\n126\n127\n128\n129\n130\n131\n132\n133\n134\n135\n136\n137\n138\n139\n140\n141\n142\n143\n144\n145\n146\n147\n148\n149\n150\n151\n152\n153\n154\n155\n156\n157\n158\n159\n160\n161\n162\n163\n164\n165\n166\n167\n168\n169\n170\n171\n172\n173\n174\n175\n176\n177\n178\n179\n180\n181\n182\n183\n184\n185\n186\n187\n188\n189\n190\n191\n192\n193\n194\n195\n196\n197\n198\n199\n200\n201\n202\n203\n204\n205\n206\n207\n208\n209\n210\n211\n212\n213\n214\n215\n216\n217\n218\n219\n220\n221\n222\n223\n224\n225\n226\n227\n228\n229\n230\n231\n232\n233\n234\n235\n236\n237\n238\n239\n240\n241\n242\n243\n244\n245\n246\n247\n248\n249\n250\n251\n252\n253\n254\n255\n256\n257\n258\n259\n260\n261\n262\n263\n264\n265\n266\n267\n268\n269\n270\n271\n272\n273\n274\n275\n276\n277\n278\n279\n280\n281\n282\n283\n284\n285\n286\n287\n288\n289\n290\n291\n292\n293\n294\n295\n296\n297\n298\n299\n300\n301\n302\n303\n304\n305\n306\n307\n308\n309\n310\n311\n312\n313\n314\n315\n316\n317\n318\n319\n320\n321\n322\n323\n324\n325\n326\n327\n328\n329\n330\n331\n332\n333\n334\n335\n336\n337\n338\n339\n340\n341\n342\n343\n344\n345\n346\n347\n348\n349\n350\n351\n352\n353\n354\n355\n356\n357\n358\n359\n360\n361\n362\n363\n364\n365\n366\n367\n368\n369\n370\n371\n372\n373\n374\n375\n376\n377\n378\n379\n380\n381\n382\n383\n384\n385\n386\n387\n388\n389\n390\n391\n392\n393\n394\n395\n396\n397\n398\n399\n400\n401\n402\n403\n404\n405\n406\n407\n408\n409\n410\n411\n412\n413\n414\n415\n416\n417\n418\n419\n420\n421\n422\n423\n424\n425\n426\n427\n428\n429\n430\n431\n432\n433\n434\n435\n436\n437\n438\n439\n440\n441\n442\n443\n444\n445\n446\n447\n448\n449\n450\n451\n452\n453\n454\n455\n456\n457\n458\n459\n460\n461\n462\n463\n464\n465\n466\n467\n468\n469\n470\n471\n472\n473\n474\n475\n476\n477\n478\n479\n480\n481\n482\n483\n484\n485\n486\n487\n488\n489\n490\n491\n492\n493\n494\n495\n496\n497\n498\n499\n500\n501\n502\n503\n504\n505\n506\n507\n508\n509\n510\n511\n512\n513\n514\n515\n516\n517\n518\n519\n520\n521\n522\n523\n524\n525\n526\n527\n528\n529\n530\n531\n532\n533\n534\n535\n536\n537\n538\n539\n540\n541\n542\n543\n544\n545\n546\n547\n548\n549\n550\n551\n552\n553\n554\n555\n556\n557\n558\n559\n560\n561\n562\n563\n564\n565\n566\n567\n568\n569\n570\n571\n572\n573\n574\n575\n576\n577\n578\n579\n580\n581\n582\n583\n584\n585\n586\n587\n588\n589\n590\n591\n592\n593\n594\n595\n596\n597\n598\n599\n600\n601\n602\n603\n604\n605\n606\n607\n608\n609\n610\n611\n612\n613\n614\n615\n616\n617\n618\n619\n620\n621\n622\n623\n624\n625\n626\n627\n628\n629\n630\n631\n632\n633\n634\n635\n636\n637\n638\n639\n640\n641\n642\n643\n644\n645\n646\n647\n648\n649\n650\n651\n652\n653\n654\n655\n656\n657\n658\n659\n660\n661\n662\n663\n664\n665\n666\n667\n668\n669\n670\n671\n672\n673\n674\n675\n676\n677\n678\n679\n680\n681\n682\n683\n684\n685\n686\n687\n688\n689\n690\n691\n692\n693\n694\n695\n696\n697\n698\n699\n700\n701\n702\n703\n704\n705\n706\n707\n708\n709\n710\n711\n712\n713\n714\n715\n716\n717\n718\n719\n720\n721\n722\n723\n724\n725\n726\n727\n728\n729\n730\n731\n732\n733\n734\n735\n736\n737\n738\n739\n740\n741\n742\n743\n744\n745\n746\n747\n748\n749\n750\n751\n752\n753\n754\n755\n756\n757\n758\n759\n760\n761\n762\n763\n764\n765\n766\n767\n768\n769\n770\n771\n772\n773\n774\n775\n776\n777\n778\n779\n780\n781\n782\n783\n784\n785\n786\n787\n788\n789\n790\n791\n792\n793\n794\n795\n796\n797\n798\n799\n800\n801\n802\n803\n804\n805\n806\n807\n808\n809\n810\n811\n812\n813\n814\n815\n816\n817\n818\n819\n820\n821\n822\n823\n824\n825\n826\n827\n828\n829\n830\n831\n832\n833\n834\n835\n836\n837\n838\n839\n840\n841\n842\n843\n844\n845\n846\n847\n848\n849\n850\n851\n852\n853\n854\n855\n856\n857\n858\n859\n860\n861\n862\n863\n864\n865\n866\n867\n868\n869\n870\n871\n872\n873\n874\n875\n876\n877\n878\n879\n880\n881\n882\n883\n884\n885\n886\n887\n888\n889\n890\n891\n892\n893\n894\n895\n896\n897\n898\n899\n900\n901\n902\n903\n904\n905\n906\n907\n908\n909\n910\n911\n912\n913\n914\n915\n916\n917\n918\n919\n920\n921\n922\n923\n924\n925\n926\n927\n928\n929\n930\n931\n932\n933\n934\n935\n936\n937\n938\n939\n940\n941\n942\n943\n944\n945\n946\n947\n948\n949\n950\n951\n952\n953\n954\n955\n956\n957\n958\n959\n960\n961\n962\n963\n964\n965\n966\n967\n968\n969\n970\n971\n972\n973\n974\n975\n976\n977\n978\n979\n980\n981\n982\n983\n984\n985\n986\n987\n988\n989\n990\n991\n992\n993\n994\n995\n996\n997\n998\n999\n1000\n1001\n1002\n1003\n1004\n1005\n1006\n1007\n1008\n1009\n1010\n1011\n1012\n1013\n1014\n1015\n1016\n1017\n1018\n1019\n1020\n1021\n1022\n1023\n1024\n1025\n1026\n1027\n1028\n1029\n1030\n1031\n1032\n1033\n1034\n1035\n1036\n1037\n1038\n1039\n1040\n1041\n1042\n1043\n1044\n1045\n1046\n1047\n1048\n1049\n1050\n1051\n1052\n1053\n1054\n1055\n1056\n1057\n1058\n1059\n1060\n1061\n1062\n1063\n1064\n1065\n1066\n1067\n1068\n1069\n1070\n1071\n1072\n1073\n1074\n1075\n1076\n1077\n1078\n1079\n1080\n1081\n1082\n1083\n1084\n1085\n1086\n1087\n1088\n1089\n1090\n1091\n1092\n1093\n1094\n1095\n1096\n1097\n1098\n1099\n1100\n1101\n1102\n1103\n1104\n1105\n1106\n1107\n1108\n1109\n1110\n1111\n1112\n1113\n1114\n1115\n1116\n1117\n1118\n1119\n1120\n1121\n1122\n1123\n1124\n1125\n1126\n1127\n1128\n1129\n1130\n1131\n1132\n1133\n1134\n1135\n1136\n1137\n1138\n1139\n1140\n1141\n1142\n1143\n1144\n1145\n1146\n1147\n1148\n1149\n1150\n1151\n1152\n1153\n1154\n1155\n1156\n1157\n1158\n1159\n1160\n1161\n1162\n1163\n1164\n1165\n1166\n1167\n1168\n1169\n1170\n1171\n1172\n1173\n1174\n1175\n1176\n1177\n1178\n1179\n1180\n1181\n1182\n1183\n1184\n1185\n1186\n1187\n1188\n1189\n1190\n1191\n1192\n1193\n1194\n1195\n1196\n1197\n1198\n1199\n1200\n1201\n1202\n1203\n1204\n1205\n1206\n1207\n1208\n1209\n1210\n1211\n1212\n1213\n1214\n1215\n1216\n1217\n1218\n1219\n1220\n1221\n1222\n1223\n1224\n1225\n1226\n1227\n1228\n1229\n1230\n1231\n1232\n1233\n1234\n1235\n1236\n1237\n1238\n1239\n1240\n1241\n1242\n1243\n1244\n1245\n1246\n1247\n1248\n1249\n1250\n1251\n1252\n1253\n1254\n1255\n1256\n1257\n1258\n1259\n1260\n1261\n1262\n1263\n1264\n1265\n1266\n1267\n1268\n1269\n1270\n1271\n1272\n1273\n1274\n1275\n1276\n1277\n1278\n1279\n1280\n1281\n1282\n1283\n1284\n1285\n1286\n1287\n1288\n1289\n1290\n1291\n1292\n1293\n1294\n1295\n1296\n1297\n1298\n1299\n1300\n1301\n1302\n1303\n1304\n1305\n1306\n1307\n1308\n1309\n1310\n1311\n1312\n1313\n1314\n1315\n1316\n1317\n1318\n1319\n1320\n1321\n1322\n1323\n1324\n1325\n1326\n1327\n1328\n1329\n1330\n1331\n1332\n1333\n1334\n1335\n1336\n1337\n1338\n1339\n1340\n1341\n1342\n1343\n1344\n1345\n1346\n1347\n1348\n1349\n1350\n1351\n1352\n1353\n1354\n1355\n1356\n1357\n1358\n1359\n1360\n1361\n1362\n1363\n1364\n1365\n1366\n1367\n1368\n1369\n1370\n1371\n1372\n1373\n1374\n1375\n1376\n1377\n1378\n1379\n1380\n1381\n1382\n1383\n1384\n1385\n1386\n1387\n1388\n1389\n1390\n1391\n1392\n1393\n1394\n1395\n1396\n1397\n1398\n1399\n1400\n1401\n1402\n1403\n1404\n1405\n1406\n1407\n1408\n1409\n1410\n1411\n1412\n1413\n1414\n1415\n1416\n1417\n1418\n1419\n1420\n1421\n1422\n1423\n1424\n1425\n1426\n1427\n1428\n1429\n1430\n1431\n1432\n1433\n1434\n1435\n1436\n1437\n1438\n1439\n1440\n1441\n1442\n1443\n1444\n1445\n1446\n1447\n1448\n1449\n1450\n1451\n1452\n1453\n1454\n1455\n1456\n1457\n1458\n1459\n1460\n1461\n1462\n1463\n1464\n1465\n1466\n1467\n1468\n1469\n1470\n1471\n1472\n1473\n1474\n1475\n1476\n1477\n1478\n1479\n1480\n1481\n1482\n1483\n1484\n1485\n1486\n1487\n1488\n1489\n1490\n1491\n1492\n1493\n1494\n1495\n1496\n1497\n1498\n1499\n1500\n1501\n1502\n1503\n1504\n1505\n1506\n1507\n1508\n1509\n1510\n1511\n1512\n1513\n1514\n1515\n1516\n1517\n1518\n1519\n1520\n1521\n1522\n1523\n1524\n1525\n1526\n1527\n1528\n1529\n1530\n1531\n1532\n1533\n1534\n1535\n1536\n1537\n1538\n1539\n1540\n1541\n1542\n1543\n1544\n1545\n1546\n1547\n1548\n1549\n1550\n1551\n1552\n1553\n1554\n1555\n1556\n1557\n1558\n1559\n1560\n1561\n1562\n1563\n1564\n1565\n1566\n1567\n1568\n1569\n1570\n1571\n1572\n1573\n1574\n1575\n1576\n1577\n1578\n1579\n1580\n1581\n1582\n1583\n1584\n1585\n1586\n1587\n1588\n1589\n1590\n1591\n1592\n1593\n1594\n1595\n1596\n1597\n1598\n1599\n1600\n1601\n1602\n1603\n1604\n1605\n1606\n1607\n1608\n1609\n1610\n1611\n1612\n1613\n1614\n1615\n1616\n1617\n1618\n1619\n1620\n1621\n1622\n1623\n1624\n1625\n1626\n1627\n1628\n1629\n1630\n1631\n1632\n1633\n1634\n1635\n1636\n1637\n1638\n1639\n1640\n1641\n1642\n1643\n1644\n1645\n1646\n1647\n1648\n1649\n1650\n1651\n1652\n1653\n1654\n1655\n1656\n1657\n1658\n1659\n1660\n1661\n1662\n1663\n1664\n1665\n1666\n1667\n1668\n1669\n1670\n1671\n1672\n1673\n1674\n1675\n1676\n1677\n1678\n1679\n1680\n1681\n1682\n1683\n1684\n1685\n1686\n1687\n1688\n1689\n1690\n1691\n1692\n1693\n1694\n1695\n1696\n1697\n1698\n1699\n1700\n1701\n1702\n1703\n1704\n1705\n1706\n1707\n1708\n1709\n1710\n1711\n1712\n1713\n1714\n1715\n1716\n1717\n1718\n1719\n1720\n1721\n1722\n1723\n1724\n1725\n1726\n1727\n1728\n1729\n1730\n1731\n1732\n1733\n1734\n1735\n1736\n1737\n1738\n1739\n1740\n1741\n1742\n1743\n1744\n1745\n1746\n1747\n1748\n1749\n1750\n1751\n1752\n1753\n1754\n1755\n1756\n1757\n1758\n1759\n1760\n1761\n1762\n1763\n1764\n1765\n1766\n1767\n1768\n1769\n1770\n1771\n1772\n1773\n1774\n1775\n1776\n1777\n1778\n1779\n1780\n1781\n1782\n1783\n1784\n1785\n1786\n1787\n1788\n1789\n1790\n1791\n1792\n1793\n1794\n1795\n1796\n1797\n1798\n1799\n1800\n1801\n1802\n1803\n1804\n1805\n1806\n1807\n1808\n1809\n1810\n1811\n1812\n1813\n1814\n1815\n1816\n1817\n1818\n1819\n1820\n1821\n1822\n1823\n1824\n1825\n1826\n1827\n1828\n1829\n1830\n1831\n1832\n1833\n1834\n1835\n1836\n1837\n1838\n1839\n1840\n1841\n1842\n1843\n1844\n1845\n1846\n1847\n1848\n1849\n1850\n1851\n1852\n1853\n1854\n1855\n1856\n1857\n1858\n1859\n1860\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
c52ff8729bdea53831ec61a8fe7213b2a2f58fa8
4,013
ipynb
Jupyter Notebook
entornos_virtuales/virtualenvs.ipynb
rauljrz/curso_python
f241125f0a51c39899f5d59537dca9e7b4c53489
[ "Apache-2.0" ]
null
null
null
entornos_virtuales/virtualenvs.ipynb
rauljrz/curso_python
f241125f0a51c39899f5d59537dca9e7b4c53489
[ "Apache-2.0" ]
null
null
null
entornos_virtuales/virtualenvs.ipynb
rauljrz/curso_python
f241125f0a51c39899f5d59537dca9e7b4c53489
[ "Apache-2.0" ]
null
null
null
23.331395
262
0.567406
[ [ [ "# Creación y uso de virtual environments", "_____no_output_____" ], [ "La mayoría de los paquetes del sistema se almacenan en un directorio secundario de la ruta almacenada en `sys.prefix`", "_____no_output_____" ] ], [ [ "import sys\n\nsys.prefix", "_____no_output_____" ] ], [ [ "Los paquetes de terceros instalados con easy_install o pip generalmente se colocan en uno de los directorios señalados por `site.getsitepackages`", "_____no_output_____" ] ], [ [ "import site\n\nsite.getsitepackages()", "_____no_output_____" ] ], [ [ "**Diferentes proyectos**\n\n¿Qué ocurre si necesitamos diferentes versiones de una misma librería?", "_____no_output_____" ], [ "Welcome:\n\n* conda\n`https://docs.anaconda.com/anaconda/install/`\n\nAnaconda (conda): virtual environment + librerías científicas \"bien compiladas\".", "_____no_output_____" ], [ "### Conda\n\n```sh\nconda create --name curso python=3.7\nconda install numpy\nconda install -c conda-forge jupyterlab\nconda list\npip install <...>\nconda env create -f environment.yml\nconda env remove --name curso\n```\n", "_____no_output_____" ], [ "**Cosas importantes a tener en cuenta:**\n\n* Conda permite crear environments con dependencias de otros lenguajes (C++, etc)\n* Conda puede dar [problemas](https://github.com/conda/conda/issues/6073) al crear un environment nuevo a partir de un archivo yml exportado, hay [una issue](https://github.com/conda/conda/issues/6073) en github al respecto todavía no resuelta.", "_____no_output_____" ], [ "### Otros managers a tener en cuenta:\n\n* [venv](https://docs.python.org/3/library/venv.html) + [pip-tools](https://github.com/jazzband/pip-tools) 🙋🏻‍♂️\n* [virtualenv](https://virtualenv.pypa.io/en/latest/) parecido a `venv`\n* [poetry](https://github.com/sdispater/poetry)\n* [virtualenvwrapper](https://virtualenvwrapper.readthedocs.io/en/latest/). Es un wrapper de virtualven (quien lo diría por el nombre). No tiene tantas features como los demás pero es muy sencillo de usar y puede ser útil en proyectos con baja complejidad.", "_____no_output_____" ], [ "Más información: https://realpython.com/effective-python-environment/", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
c52ffd7452913172e037730273d3fa61777bcfa3
257,264
ipynb
Jupyter Notebook
docs/tutorials/embedding/gsvd.ipynb
clbonet/scikit-network
0e4970cade8b79096d4f503412ecd842aebbe428
[ "BSD-3-Clause" ]
1
2020-09-14T11:06:13.000Z
2020-09-14T11:06:13.000Z
docs/tutorials/embedding/gsvd.ipynb
clbonet/scikit-network
0e4970cade8b79096d4f503412ecd842aebbe428
[ "BSD-3-Clause" ]
2
2020-10-17T08:21:38.000Z
2020-10-21T09:13:30.000Z
docs/tutorials/embedding/gsvd.ipynb
clbonet/scikit-network
0e4970cade8b79096d4f503412ecd842aebbe428
[ "BSD-3-Clause" ]
1
2020-06-19T09:39:11.000Z
2020-06-19T09:39:11.000Z
36.704808
48,616
0.550757
[ [ [ "# GSVD", "_____no_output_____" ], [ "This notebook illustrates the embedding of a graph through the [generalized singular value decomposition](https://en.wikipedia.org/wiki/Generalized_singular_value_decomposition) of the adjacency matrix. ", "_____no_output_____" ] ], [ [ "from IPython.display import SVG", "_____no_output_____" ], [ "import numpy as np", "_____no_output_____" ], [ "from sknetwork.data import karate_club, painters, movie_actor\nfrom sknetwork.embedding import GSVD, cosine_modularity\nfrom sknetwork.visualization import svg_graph, svg_digraph, svg_bigraph", "_____no_output_____" ] ], [ [ "## Graphs", "_____no_output_____" ] ], [ [ "graph = karate_club(metadata=True)\nadjacency = graph.adjacency\nlabels = graph.labels", "_____no_output_____" ] ], [ [ "**Embedding**", "_____no_output_____" ] ], [ [ "gsvd = GSVD(3, normalized=False)\nembedding = gsvd.fit_transform(adjacency)\nembedding.shape", "_____no_output_____" ], [ "# skip first component\nposition = embedding[:,1:]", "_____no_output_____" ], [ "image = svg_graph(adjacency, position, labels=labels)", "_____no_output_____" ], [ "SVG(image)", "/usr/local/Cellar/python/3.7.4_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/networkx/drawing/nx_pylab.py:402: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working\n if isinstance(alpha, collections.Iterable):\n/usr/local/Cellar/python/3.7.4_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/networkx/drawing/nx_pylab.py:579: MatplotlibDeprecationWarning: \nThe iterable function was deprecated in Matplotlib 3.1 and will be removed in 3.3. Use np.iterable instead.\n if not cb.iterable(width):\n" ] ], [ [ "**Metrics**", "_____no_output_____" ] ], [ [ "cosine_modularity(adjacency, embedding)", "_____no_output_____" ] ], [ [ "## Digraphs", "_____no_output_____" ] ], [ [ "graph = painters(metadata=True)\nadjacency = graph.adjacency\nposition = graph.position\nnames = graph.names", "_____no_output_____" ] ], [ [ "**Embedding**", "_____no_output_____" ] ], [ [ "gsvd = GSVD(3, normalized=False)\nembedding = gsvd.fit_transform(adjacency)\nembedding.shape", "_____no_output_____" ], [ "# skip first component\nposition = embedding[:,1:]", "_____no_output_____" ], [ "image = svg_digraph(adjacency, position, names=names)", "_____no_output_____" ], [ "SVG(image)", "/usr/local/Cellar/python/3.7.4_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/networkx/drawing/nx_pylab.py:402: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working\n if isinstance(alpha, collections.Iterable):\n/usr/local/Cellar/python/3.7.4_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/networkx/drawing/nx_pylab.py:579: MatplotlibDeprecationWarning: \nThe iterable function was deprecated in Matplotlib 3.1 and will be removed in 3.3. Use np.iterable instead.\n if not cb.iterable(width):\n" ] ], [ [ "**Metrics**", "_____no_output_____" ] ], [ [ "cosine_modularity(adjacency, embedding)", "_____no_output_____" ] ], [ [ "## Bigraphs", "_____no_output_____" ], [ "**Loading**", "_____no_output_____" ] ], [ [ "graph = movie_actor(metadata=True)\nbiadjacency = graph.biadjacency\nnames_row = graph.names_row\nnames_col = graph.names_col", "_____no_output_____" ] ], [ [ "**Embedding**", "_____no_output_____" ] ], [ [ "gsvd = GSVD(3, normalized=False)\ngsvd.fit(biadjacency)", "_____no_output_____" ], [ "embedding_row = gsvd.embedding_row_\nembedding_row.shape", "_____no_output_____" ], [ "embedding_col = gsvd.embedding_col_\nembedding_col.shape", "_____no_output_____" ], [ "# skip first component\nposition_row = embedding_row[:,1:]\nposition_col = embedding_col[:,1:]", "_____no_output_____" ], [ "image = svg_bigraph(biadjacency, names_row, names_col, \n position_row=embedding_row, position_col=embedding_col,\n color_row='blue', color_col='red')", "_____no_output_____" ], [ "SVG(image)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
c530058f5959b15e50abe9e97a5215c212161a1f
20,280
ipynb
Jupyter Notebook
notebooks/202-vision-superresolution/202-vision-superresolution-video.ipynb
SalnikovIgor/openvino_notebooks
d137fa74e5d6829e63b844b31cffaea7998bcc5b
[ "Apache-2.0" ]
2
2021-09-03T00:03:26.000Z
2021-09-03T00:03:31.000Z
notebooks/202-vision-superresolution/202-vision-superresolution-video.ipynb
raymondlo84/openvino_notebooks
990d22a7f9ae6ac0ec964f02b2eb357f7bb5d32f
[ "Apache-2.0" ]
null
null
null
notebooks/202-vision-superresolution/202-vision-superresolution-video.ipynb
raymondlo84/openvino_notebooks
990d22a7f9ae6ac0ec964f02b2eb357f7bb5d32f
[ "Apache-2.0" ]
null
null
null
36.149733
825
0.582002
[ [ [ "# Video Super Resolution with OpenVINO\nSuper Resolution is the process of enhancing the quality of an image by increasing the pixel count using deep learning. This notebook applies Single Image Super Resolution (SISR) to frames in a 360p (480×360) video in 360p resolution. We use a model called [single-image-super-resolution-1032](https://github.com/openvinotoolkit/open_model_zoo/tree/develop/models/intel/single-image-super-resolution-1032) which is available from the Open Model Zoo. It is based on the research paper cited below. \n\nY. Liu et al., [\"An Attention-Based Approach for Single Image Super Resolution,\"](https://arxiv.org/abs/1807.06779) 2018 24th International Conference on Pattern Recognition (ICPR), 2018, pp. 2777-2784, doi: 10.1109/ICPR.2018.8545760.\n\n**NOTE:** The Single Image Super Resolution (SISR) model used in this demo is not optimized for video. Results may vary depending on the video. We are looking for a more suitable Multi Image Super Resolution (MISR) model, so if you know of a great open source model, please let us know! You can start a [discussion](https://github.com/openvinotoolkit/openvino_notebooks/discussions) or create an [issue](https://github.com/openvinotoolkit/openvino_notebooks/issues) on GitHub. ", "_____no_output_____" ], [ "## Preparation", "_____no_output_____" ], [ "### Imports", "_____no_output_____" ] ], [ [ "import os\nimport time\nimport urllib\nfrom pathlib import Path\n\nimport cv2\nimport numpy as np\nfrom IPython.display import (\n HTML,\n FileLink,\n Pretty,\n ProgressBar,\n Video,\n clear_output,\n display,\n)\nfrom openvino.inference_engine import IECore\nfrom pytube import YouTube", "_____no_output_____" ] ], [ [ "### Settings", "_____no_output_____" ] ], [ [ "# Device to use for inference. For example, \"CPU\", or \"GPU\"\nDEVICE = \"CPU\"\n# 1032: 4x superresolution, 1033: 3x superresolution\nMODEL_FILE = \"model/single-image-super-resolution-1032.xml\"\nmodel_name = os.path.basename(MODEL_FILE)\nmodel_xml_path = Path(MODEL_FILE).with_suffix(\".xml\")", "_____no_output_____" ] ], [ [ "### Functions\n", "_____no_output_____" ] ], [ [ "def write_text_on_image(image: np.ndarray, text: str) -> np.ndarray:\n \"\"\"\n Write the specified text in the top left corner of the image\n as white text with a black border.\n\n :param image: image as numpy arry with HWC shape, RGB or BGR\n :param text: text to write\n :return: image with written text, as numpy array\n \"\"\"\n font = cv2.FONT_HERSHEY_PLAIN\n org = (20, 20)\n font_scale = 4\n font_color = (255, 255, 255)\n line_type = 1\n font_thickness = 2\n text_color_bg = (0, 0, 0)\n x, y = org\n\n image = cv2.UMat(image)\n (text_w, text_h), _ = cv2.getTextSize(\n text=text, fontFace=font, fontScale=font_scale, thickness=font_thickness\n )\n result_im = cv2.rectangle(\n img=image, pt1=org, pt2=(x + text_w, y + text_h), color=text_color_bg, thickness=-1\n )\n\n textim = cv2.putText(\n img=result_im,\n text=text,\n org=(x, y + text_h + font_scale - 1),\n fontFace=font,\n fontScale=font_scale,\n color=font_color,\n thickness=font_thickness,\n lineType=line_type,\n )\n return textim.get()\n\n\ndef load_image(path: str) -> np.ndarray:\n \"\"\"\n Loads an image from `path` and returns it as BGR numpy array.\n\n :param path: path to an image filename or url\n :return: image as numpy array, with BGR channel order\n \"\"\"\n if path.startswith(\"http\"):\n # Set User-Agent to Mozilla because some websites block requests\n # with User-Agent Python\n request = urllib.request.Request(url=path, headers={\"User-Agent\": \"Mozilla/5.0\"})\n response = urllib.request.urlopen(url=request)\n array = np.asarray(bytearray(response.read()), dtype=\"uint8\")\n image = cv2.imdecode(buf=array, flags=-1) # Loads the image as BGR\n else:\n image = cv2.imread(filename=path)\n return image\n\n\ndef convert_result_to_image(result) -> np.ndarray:\n \"\"\"\n Convert network result of floating point numbers to image with integer\n values from 0-255. Values outside this range are clipped to 0 and 255.\n\n :param result: a single superresolution network result in N,C,H,W shape\n \"\"\"\n result = result.squeeze(0).transpose(1, 2, 0)\n result *= 255\n result[result < 0] = 0\n result[result > 255] = 255\n result = result.astype(np.uint8)\n return result", "_____no_output_____" ] ], [ [ "## Load the Superresolution Model", "_____no_output_____" ], [ "Load the model in Inference Engine with `ie.read_network` and load it to the specified device with `ie.load_network`", "_____no_output_____" ] ], [ [ "ie = IECore()\nnet = ie.read_network(model=model_xml_path)\nexec_net = ie.load_network(network=net, device_name=DEVICE)", "_____no_output_____" ] ], [ [ "Get information about network inputs and outputs. The Super Resolution model expects two inputs: 1) the input image, 2) a bicubic interpolation of the input image to the target size 1920x1080. It returns the super resolution version of the image in 1920x1800. ", "_____no_output_____" ] ], [ [ "# Network inputs and outputs are dictionaries. Get the keys for the\n# dictionaries.\noriginal_image_key = list(exec_net.input_info)[0]\nbicubic_image_key = list(exec_net.input_info)[1]\noutput_key = list(exec_net.outputs.keys())[0]\n\n# Get the expected input and target shape. `.dims[2:]` returns the height\n# and width. OpenCV's resize function expects the shape as (width, height),\n# so we reverse the shape with `[::-1]` and convert it to a tuple\ninput_height, input_width = tuple(exec_net.input_info[original_image_key].tensor_desc.dims[2:])\ntarget_height, target_width = tuple(exec_net.input_info[bicubic_image_key].tensor_desc.dims[2:])\n\nupsample_factor = int(target_height / input_height)\n\nprint(f\"The network expects inputs with a width of {input_width}, \" f\"height of {input_height}\")\nprint(f\"The network returns images with a width of {target_width}, \" f\"height of {target_height}\")\n\nprint(\n f\"The image sides are upsampled by a factor {upsample_factor}. \"\n f\"The new image is {upsample_factor**2} times as large as the \"\n \"original image\"\n)", "_____no_output_____" ] ], [ [ "## Superresolution on Video\n\nDownload a YouTube\\* video with PyTube and enhance the video quality with superresolution. \n\nBy default only the first 100 frames of the video are processed. Change NUM_FRAMES in the cell below to modify this. \n\n**Note:**\n- The resulting video does not contain audio.\n- The input video should be a landscape video and have an an input resultion of 360p (640x360) for the 1032 model, or 480p (720x480) for the 1033 model.", "_____no_output_____" ], [ "### Settings", "_____no_output_____" ] ], [ [ "VIDEO_DIR = \"data\"\nOUTPUT_DIR = \"output\"\n\nos.makedirs(name=str(OUTPUT_DIR), exist_ok=True)\n# Number of frames to read from the input video. Set to 0 to read all frames.\nNUM_FRAMES = 100\n# The format for saving the result video's\n# vp09 is slow, but widely available. If you have FFMPEG installed, you can\n# change the FOURCC to `*\"THEO\"` to improve video writing speed\nFOURCC = cv2.VideoWriter_fourcc(*\"vp09\")", "_____no_output_____" ] ], [ [ "### Download and Prepare Video", "_____no_output_____" ] ], [ [ "# Use pytube to download a video. It downloads to the videos subdirectory.\n# You can also place a local video there and comment out the following lines\nVIDEO_URL = \"https://www.youtube.com/watch?v=V8yS3WIkOrA\"\nyt = YouTube(VIDEO_URL)\n# Use `yt.streams` to see all available streams. See the PyTube documentation\n# https://python-pytube.readthedocs.io/en/latest/api.html for advanced\n# filtering options\ntry:\n os.makedirs(name=VIDEO_DIR, exist_ok=True)\n stream = yt.streams.filter(resolution=\"360p\").first()\n filename = Path(stream.default_filename.encode(\"ascii\", \"ignore\").decode(\"ascii\")).stem\n stream.download(output_path=OUTPUT_DIR, filename=filename)\n print(f\"Video {filename} downloaded to {OUTPUT_DIR}\")\n\n # Create Path objects for the input video and the resulting videos\n video_path = Path(stream.get_file_path(filename, OUTPUT_DIR))\nexcept Exception:\n # If PyTube fails, use a local video stored in the VIDEO_DIR directory\n video_path = Path(rf\"{VIDEO_DIR}/CEO Pat Gelsinger on Leading Intel.mp4\")\n\n# Path names for the result videos\nsuperres_video_path = Path(f\"{OUTPUT_DIR}/{video_path.stem}_superres.mp4\")\nbicubic_video_path = Path(f\"{OUTPUT_DIR}/{video_path.stem}_bicubic.mp4\")\ncomparison_video_path = Path(f\"{OUTPUT_DIR}/{video_path.stem}_superres_comparison.mp4\")", "_____no_output_____" ], [ "# Open the video and get the dimensions and the FPS\ncap = cv2.VideoCapture(filename=str(video_path))\nret, image = cap.read()\nif not ret:\n raise ValueError(f\"The video at '{video_path}' cannot be read.\")\nfps = cap.get(cv2.CAP_PROP_FPS)\noriginal_frame_height, original_frame_width = image.shape[:2]\n\ncap.release()\nprint(\n f\"The input video has a frame width of {original_frame_width}, \"\n f\"frame height of {original_frame_height} and runs at {fps:.2f} fps\"\n)", "_____no_output_____" ] ], [ [ "Create superresolution video, bicubic video and comparison video. The superresolution video contains the enhanced video, upsampled with superresolution, the bicubic video is the input video upsampled with bicubic interpolation, the combination video sets the bicubic video and the superresolution side by side.", "_____no_output_____" ] ], [ [ "superres_video = cv2.VideoWriter(\n filename=str(superres_video_path),\n fourcc=FOURCC,\n fps=fps,\n frameSize=(target_width, target_height),\n)\nbicubic_video = cv2.VideoWriter(\n filename=str(bicubic_video_path),\n fourcc=FOURCC,\n fps=fps,\n frameSize=(target_width, target_height),\n)\ncomparison_video = cv2.VideoWriter(\n filename=str(comparison_video_path),\n fourcc=FOURCC,\n fps=fps,\n frameSize=(target_width * 2, target_height),\n)", "_____no_output_____" ] ], [ [ "### Do Inference\n\nRead video frames and enhance them with superresolution. Save the superresolution video, the bicubic video and the comparison video to file.\n\nThe code in this cell reads the video frame by frame. Each frame is resized and reshaped to network input shape and upsampled with bicubic interpolation to target shape. Both the original and the bicubic image are propagated through the network. The network result is a numpy array with floating point values, with a shape of (1,3,1920,1080). This array is converted to an 8-bit image with shape (1080,1920,3) and written to `superres_video`. The bicubic image is written to `bicubic_video` for comparison. Lastly, the bicubic and result frames are combined side by side and written to `comparison_video`. A progress bar shows the progress of the process. Inference time is measured, as well as total time to process each frame, which includes inference time as well as the time it takes to process and write the video.", "_____no_output_____" ] ], [ [ "start_time = time.perf_counter()\nframe_nr = 1\ntotal_inference_duration = 0\ntotal_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT) if NUM_FRAMES == 0 else NUM_FRAMES\n\nprogress_bar = ProgressBar(total=total_frames)\nprogress_bar.display()\n\n\ncap = cv2.VideoCapture(filename=str(video_path))\ntry:\n while cap.isOpened():\n ret, image = cap.read()\n if not ret:\n cap.release()\n break\n\n if NUM_FRAMES > 0 and frame_nr == NUM_FRAMES:\n break\n\n # Resize the input image to network shape and convert from (H,W,C) to\n # (N,C,H,W)\n resized_image = cv2.resize(src=image, dsize=(input_width, input_height))\n input_image_original = np.expand_dims(resized_image.transpose(2, 0, 1), axis=0)\n\n # Resize and reshape the image to the target shape with bicubic\n # interpolation\n bicubic_image = cv2.resize(\n src=image, dsize=(target_width, target_height), interpolation=cv2.INTER_CUBIC\n )\n input_image_bicubic = np.expand_dims(bicubic_image.transpose(2, 0, 1), axis=0)\n\n # Do inference\n inference_start_time = time.perf_counter()\n result = exec_net.infer(\n inputs={\n original_image_key: input_image_original,\n bicubic_image_key: input_image_bicubic,\n }\n )[output_key]\n inference_stop_time = time.perf_counter()\n inference_duration = inference_stop_time - inference_start_time\n total_inference_duration += inference_duration\n\n # Transform inference result into an image\n result_frame = convert_result_to_image(result=result)\n\n # Write resulting image and bicubic image to video\n superres_video.write(image=result_frame)\n bicubic_video.write(image=bicubic_image)\n stacked_frame = np.hstack((bicubic_image, result_frame))\n comparison_video.write(image=stacked_frame)\n\n frame_nr = frame_nr + 1\n\n # Update progress bar and status message\n progress_bar.progress = frame_nr\n progress_bar.update()\n if frame_nr % 10 == 0:\n clear_output(wait=True)\n progress_bar.display()\n display(\n Pretty(\n f\"Processed frame {frame_nr}. Inference time: \"\n f\"{inference_duration:.2f} seconds \"\n f\"({1/inference_duration:.2f} FPS)\"\n )\n )\n\n\nexcept KeyboardInterrupt:\n print(\"Processing interrupted.\")\nfinally:\n superres_video.release()\n bicubic_video.release()\n comparison_video.release()\n end_time = time.perf_counter()\n duration = end_time - start_time\n print(f\"Video's saved to {comparison_video_path.parent} directory.\")\n print(\n f\"Processed {frame_nr} frames in {duration:.2f} seconds. Total FPS \"\n f\"(including video processing): {frame_nr/duration:.2f}. \"\n f\"Inference FPS: {frame_nr/total_inference_duration:.2f}.\"\n )", "_____no_output_____" ] ], [ [ "### Show Side-by-Side Video of Bicubic and Superresolution Version", "_____no_output_____" ] ], [ [ "if not comparison_video_path.exists():\n raise ValueError(\"The comparison video does not exist.\")\nelse:\n video_link = FileLink(comparison_video_path)\n video_link.html_link_str = \"<a href='%s' download>%s</a>\"\n display(\n HTML(\n f\"Showing side by side comparison. If you cannot see the video in \"\n \"your browser, please click on the following link to download \"\n f\"the video<br>{video_link._repr_html_()}\"\n )\n )\n display(Video(comparison_video_path, width=800, embed=True))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
c5302a80e136f3951526536e4529f32dab5a7dde
68,682
ipynb
Jupyter Notebook
notebookToHtml/oldFiles/biosignalsnotebooks_html_publish/Categories/MainFiles/by_diff.ipynb
biosignalsnotebooks/biosignalsnotebooks
72b1f053320747683bb9ff123ca180cb1bd47f6a
[ "MIT" ]
7
2018-11-07T14:40:13.000Z
2019-11-03T20:38:52.000Z
notebookToHtml/oldFiles/biosignalsnotebooks_html_publish/Categories/MainFiles/by_diff.ipynb
csavur/biosignalsnotebooks
c99596741a854c58bdefb429906023ac48ddc3b7
[ "MIT" ]
null
null
null
notebookToHtml/oldFiles/biosignalsnotebooks_html_publish/Categories/MainFiles/by_diff.ipynb
csavur/biosignalsnotebooks
c99596741a854c58bdefb429906023ac48ddc3b7
[ "MIT" ]
1
2019-06-02T07:50:41.000Z
2019-06-02T07:50:41.000Z
44.656697
5,049
0.512157
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
c5304124fe1321dac4efd68721726b233c702445
25,455
ipynb
Jupyter Notebook
Section 1/Reinforcement Learning with TensorFlow & TRFL -- Q Learning.ipynb
PacktPublishing/Hands-On-Reinforcement-Learning-with-TensorFlow-TRFL
e1bbb654fbd95c12631a350af1f2d0858cf5e522
[ "MIT" ]
12
2019-04-19T02:00:33.000Z
2021-10-04T06:37:07.000Z
Section 1/Reinforcement Learning with TensorFlow & TRFL -- Q Learning.ipynb
PacktPublishing/Hands-On-Reinforcement-Learning-with-TensorFlow-TRFL
e1bbb654fbd95c12631a350af1f2d0858cf5e522
[ "MIT" ]
null
null
null
Section 1/Reinforcement Learning with TensorFlow & TRFL -- Q Learning.ipynb
PacktPublishing/Hands-On-Reinforcement-Learning-with-TensorFlow-TRFL
e1bbb654fbd95c12631a350af1f2d0858cf5e522
[ "MIT" ]
13
2019-04-19T02:06:46.000Z
2020-08-26T20:50:41.000Z
25,455
25,455
0.639717
[ [ [ "**Reinforcement Learning with TensorFlow & TRFL: Q Learning**\n* This notebook shows how to apply the classic Reinforcement Learning (RL) idea of Q learning with TRFL.\n* In TD learning we estimated state values: V(s). In Q learning we estimate action values: Q(s,a). Here we'll go over Q learning in the simple tabular case. Next section we will use this same Q learning function in powerful Deep Learning algorithms like Deep Q Network.\n* A key concept in RL is exploration. We'll introduce and use epsilon greedy exploration, which is often used with Q learning.\n\nOutline:\n1. Install TRFL\n2. Define the GridWorld environment\n3. Discuss Epsilon-Greedy Exploration\n4. Find the value of each state-action value in the environment using Q learning\n\n\n\n", "_____no_output_____" ] ], [ [ "#TRFL has issues on Colab with TensorFlow version tensorflow-1.13.0rc1\n#install TensorFlow 1.12 and restart run time\n!pip install tensorflow==1.12\n\nimport os\nos.kill(os.getpid(), 9)", "Collecting tensorflow==1.12\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/22/cc/ca70b78087015d21c5f3f93694107f34ebccb3be9624385a911d4b52ecef/tensorflow-1.12.0-cp36-cp36m-manylinux1_x86_64.whl (83.1MB)\n\u001b[K 100% |████████████████████████████████| 83.1MB 228kB/s \n\u001b[?25hRequirement already satisfied: keras-preprocessing>=1.0.5 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.12) (1.0.9)\nRequirement already satisfied: numpy>=1.13.3 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.12) (1.16.2)\nRequirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.12) (1.1.0)\nRequirement already satisfied: astor>=0.6.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.12) (0.7.1)\nRequirement already satisfied: six>=1.10.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.12) (1.11.0)\nRequirement already satisfied: absl-py>=0.1.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.12) (0.7.1)\nRequirement already satisfied: gast>=0.2.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.12) (0.2.2)\nRequirement already satisfied: protobuf>=3.6.1 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.12) (3.7.1)\nCollecting tensorboard<1.13.0,>=1.12.0 (from tensorflow==1.12)\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/07/53/8d32ce9471c18f8d99028b7cef2e5b39ea8765bd7ef250ca05b490880971/tensorboard-1.12.2-py3-none-any.whl (3.0MB)\n\u001b[K 100% |████████████████████████████████| 3.1MB 8.4MB/s \n\u001b[?25hRequirement already satisfied: grpcio>=1.8.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.12) (1.15.0)\nRequirement already satisfied: wheel>=0.26 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.12) (0.33.1)\nRequirement already satisfied: keras-applications>=1.0.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.12) (1.0.7)\nRequirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from protobuf>=3.6.1->tensorflow==1.12) (40.9.0)\nRequirement already satisfied: werkzeug>=0.11.10 in /usr/local/lib/python3.6/dist-packages (from tensorboard<1.13.0,>=1.12.0->tensorflow==1.12) (0.15.2)\nRequirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.6/dist-packages (from tensorboard<1.13.0,>=1.12.0->tensorflow==1.12) (3.1)\nRequirement already satisfied: h5py in /usr/local/lib/python3.6/dist-packages (from keras-applications>=1.0.6->tensorflow==1.12) (2.8.0)\nInstalling collected packages: tensorboard, tensorflow\n Found existing installation: tensorboard 1.13.1\n Uninstalling tensorboard-1.13.1:\n Successfully uninstalled tensorboard-1.13.1\n Found existing installation: tensorflow 1.13.1\n Uninstalling tensorflow-1.13.1:\n Successfully uninstalled tensorflow-1.13.1\nSuccessfully installed tensorboard-1.12.2 tensorflow-1.12.0\n" ], [ "#install TRFL\n!pip install trfl==1.0\n\n#install Tensorflow Probability\n!pip install tensorflow-probability==0.5.0", "Requirement already satisfied: trfl==1.0 in /usr/local/lib/python3.6/dist-packages (1.0)\nRequirement already satisfied: dm-sonnet in /usr/local/lib/python3.6/dist-packages (from trfl==1.0) (1.23)\nRequirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from trfl==1.0) (1.16.2)\nRequirement already satisfied: absl-py in /usr/local/lib/python3.6/dist-packages (from trfl==1.0) (0.7.1)\nRequirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from trfl==1.0) (1.11.0)\nCollecting tensorflow-probability==0.5.0\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/a1/ca/6f213618b5f7d0bf6139e6ec928d412a5ca14e4776adfd41a59c74a34021/tensorflow_probability-0.5.0-py2.py3-none-any.whl (680kB)\n\u001b[K 100% |████████████████████████████████| 686kB 20.6MB/s \n\u001b[?25hRequirement already satisfied: numpy>=1.13.3 in /usr/local/lib/python3.6/dist-packages (from tensorflow-probability==0.5.0) (1.16.2)\nRequirement already satisfied: six>=1.10.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-probability==0.5.0) (1.11.0)\nInstalling collected packages: tensorflow-probability\n Found existing installation: tensorflow-probability 0.6.0\n Uninstalling tensorflow-probability-0.6.0:\n Successfully uninstalled tensorflow-probability-0.6.0\nSuccessfully installed tensorflow-probability-0.5.0\n" ] ], [ [ "**GridWorld**\n\nThe GridWorld environment is a four by four grid. The agent randomly starts on the grid and can move either up, left, right, or down. If the agent reaches the upper left or lower right the episode is over. Every action the agent takes gets a reward of -1 until you reach the upper left or over right.", "_____no_output_____" ] ], [ [ "#Environment from: https://github.com/dennybritz/reinforcement-learning/blob/cee9e78652f8ce98d6079282daf20680e5e17c6a/lib/envs/gridworld.py\n\n#define the environment\n\nimport io\nimport numpy as np\nimport sys\nfrom gym.envs.toy_text import discrete\nimport pprint\n\nUP = 0\nRIGHT = 1\nDOWN = 2\nLEFT = 3\n\nclass GridworldEnv(discrete.DiscreteEnv):\n \"\"\"\n Grid World environment from Sutton's Reinforcement Learning book chapter 4.\n You are an agent on an MxN grid and your goal is to reach the terminal\n state at the top left or the bottom right corner.\n For example, a 4x4 grid looks as follows:\n T o o o\n o x o o\n o o o o\n o o o T\n x is your position and T are the two terminal states.\n You can take actions in each direction (UP=0, RIGHT=1, DOWN=2, LEFT=3).\n Actions going off the edge leave you in your current state.\n You receive a reward of -1 at each step until you reach a terminal state.\n \"\"\"\n\n metadata = {'render.modes': ['human', 'ansi']}\n\n def __init__(self, shape=[4,4]):\n if not isinstance(shape, (list, tuple)) or not len(shape) == 2:\n raise ValueError('shape argument must be a list/tuple of length 2')\n\n self.shape = shape\n\n nS = np.prod(shape)\n nA = 4\n\n MAX_Y = shape[0]\n MAX_X = shape[1]\n\n P = {}\n grid = np.arange(nS).reshape(shape)\n it = np.nditer(grid, flags=['multi_index'])\n\n while not it.finished:\n s = it.iterindex\n y, x = it.multi_index\n\n # P[s][a] = (prob, next_state, reward, is_done)\n P[s] = {a : [] for a in range(nA)}\n\n is_done = lambda s: s == 0 or s == (nS - 1)\n reward = 0.0 if is_done(s) else -1.0\n #reward = 1.0 if is_done(s) else 0.0\n\n # We're stuck in a terminal state\n if is_done(s):\n P[s][UP] = [(1.0, s, reward, True)]\n P[s][RIGHT] = [(1.0, s, reward, True)]\n P[s][DOWN] = [(1.0, s, reward, True)]\n P[s][LEFT] = [(1.0, s, reward, True)]\n # Not a terminal state\n else:\n ns_up = s if y == 0 else s - MAX_X\n ns_right = s if x == (MAX_X - 1) else s + 1\n ns_down = s if y == (MAX_Y - 1) else s + MAX_X\n ns_left = s if x == 0 else s - 1\n P[s][UP] = [(1.0, ns_up, reward, is_done(ns_up))]\n P[s][RIGHT] = [(1.0, ns_right, reward, is_done(ns_right))]\n P[s][DOWN] = [(1.0, ns_down, reward, is_done(ns_down))]\n P[s][LEFT] = [(1.0, ns_left, reward, is_done(ns_left))]\n\n it.iternext()\n\n # Initial state distribution is uniform\n isd = np.ones(nS) / nS\n\n # We expose the model of the environment for educational purposes\n # This should not be used in any model-free learning algorithm\n self.P = P\n\n super(GridworldEnv, self).__init__(nS, nA, P, isd)\n\n def _render(self, mode='human', close=False):\n \"\"\" Renders the current gridworld layout\n For example, a 4x4 grid with the mode=\"human\" looks like:\n T o o o\n o x o o\n o o o o\n o o o T\n where x is your position and T are the two terminal states.\n \"\"\"\n if close:\n return\n\n outfile = io.StringIO() if mode == 'ansi' else sys.stdout\n\n grid = np.arange(self.nS).reshape(self.shape)\n it = np.nditer(grid, flags=['multi_index'])\n while not it.finished:\n s = it.iterindex\n y, x = it.multi_index\n\n if self.s == s:\n output = \" x \"\n elif s == 0 or s == self.nS - 1:\n output = \" T \"\n else:\n output = \" o \"\n\n if x == 0:\n output = output.lstrip()\n if x == self.shape[1] - 1:\n output = output.rstrip()\n\n outfile.write(output)\n\n if x == self.shape[1] - 1:\n outfile.write(\"\\n\")\n\n it.iternext()\n \npp = pprint.PrettyPrinter(indent=2)", "_____no_output_____" ] ], [ [ "**An Introduction to Exploration: Epsilon-Greedy Exploration**\n\nExploration is a key concept in RL. In order to find the best policies, an agent needs to explore the environment. By exploring, the agent can experience new states and rewards. In the last notebook, the agent explored GridWorld by taking a random action at every step. While random action explorations can work in some environments, the downside is the agent can spend too much time exploring bad states or states that have already been explored fully and not enough time exploring promising states. A simple--yet surprisingly effective--approach to exploration is Epsilon-Greedy exploration. A epsilon percentage of the time, the agent chooses a random action. The remaining amount of the time (1-epsilon) the agent choose the best estimated action aka the* greedy action*. Epsilon can be a fixed value between 0 and 1 or can start at a high value and gradually decay over time (ie start at .99 and decay to 0.01). In this notebook we will used a fixed epsilon value of 0.1. Below is a simple example of epsilon-greedy exploration.\n", "_____no_output_____" ] ], [ [ "#declare the environment\nenv = GridworldEnv()\n#reset the environment and get the agent's current position (observation)\ncurrent_state = env.reset()\nenv._render()\nprint(\"\")\naction_dict = {0:\"UP\",1:\"RIGHT\", 2:\"DOWN\",3:\"LEFT\"}\ngreedy_dict = {0:3,1:3,2:3,3:3,\n 4:0,5:0,6:0,7:0,\n 8:2,9:2,10:2,11:2,\n 12:1,13:1,14:1,15:1}\nepsilon = 0.1\n\nfor i in range(10):\n #choose random action epsilon amount of the time\n if np.random.rand() < epsilon:\n action = env.action_space.sample()\n action_type = \"random\"\n else:\n #Choose a greedy action. We will learn greedy actions with Q learning in the following cells.\n action = greedy_dict[current_state]\n action_type = \"greedy\"\n \n current_state,reward,done,info = env.step(action)\n print(\"Agent took {} action {} and is now in state {} \".format(action_type, action_dict[action], current_state))\n env._render()\n print(\"\")\n if done:\n print(\"Agent reached end of episode, resetting the env\")\n print(env.reset())\n print(\"\")\n env._render()\n print(\"\")", "T o o o\no o o x\no o o o\no o o T\n\nAgent took random action LEFT and is now in state 6 \nT o o o\no o x o\no o o o\no o o T\n\nAgent took greedy action UP and is now in state 2 \nT o x o\no o o o\no o o o\no o o T\n\nAgent took greedy action LEFT and is now in state 1 \nT x o o\no o o o\no o o o\no o o T\n\nAgent took greedy action LEFT and is now in state 0 \nx o o o\no o o o\no o o o\no o o T\n\nAgent reached end of episode, resetting the env\n11\n\nT o o o\no o o o\no o o x\no o o T\n\nAgent took random action RIGHT and is now in state 11 \nT o o o\no o o o\no o o x\no o o T\n\nAgent took greedy action DOWN and is now in state 15 \nT o o o\no o o o\no o o o\no o o x\n\nAgent reached end of episode, resetting the env\n4\n\nT o o o\nx o o o\no o o o\no o o T\n\nAgent took greedy action RIGHT and is now in state 5 \nT o o o\no x o o\no o o o\no o o T\n\nAgent took greedy action UP and is now in state 1 \nT x o o\no o o o\no o o o\no o o T\n\nAgent took greedy action LEFT and is now in state 0 \nx o o o\no o o o\no o o o\no o o T\n\nAgent reached end of episode, resetting the env\n10\n\nT o o o\no o o o\no o x o\no o o T\n\nAgent took greedy action LEFT and is now in state 9 \nT o o o\no o o o\no x o o\no o o T\n\n" ] ], [ [ "** TRFL Usage **\n\nOnce again, the three main TRFL steps are:\n1. In the TensorFlow graph, define the necessary TensorFlow tensors\n2. In the graph, feed the tensors into the trfl method\n3. In the TensorFlow session, run the graph operation\n\nWe saw this in the last notebook. Here in Q learning there are some slight differences. We use the trfl.qlearning() method and we input the action and action values (instead of state values) into the method. Note for the action values q_t and q_next_t the shape is batch size X number of actions.", "_____no_output_____" ] ], [ [ "#set up TRFL graph\nimport tensorflow as tf\nimport trfl\n\n#https://github.com/deepmind/trfl/blob/master/docs/trfl.md#qlearningq_tm1-a_tm1-r_t-pcont_t-q_t-nameqlearning\n# Args:\n# q_tm1: Tensor holding Q-values for first timestep in a batch of transitions, shape [B x num_actions].\n# a_tm1: Tensor holding action indices, shape [B].\n# r_t: Tensor holding rewards, shape [B].\n# pcont_t: Tensor holding pcontinue values, shape [B].\n# q_t: Tensor holding Q-values for second timestep in a batch of transitions, shape [B x num_actions].\n# name: name to prefix ops created within this op.\n\n\nnum_actions = env.action_space.n\nbatch_size = 1\n\nq_t = tf.placeholder(dtype=tf.float32,shape=[batch_size,num_actions],name=\"q_value\")\naction_t = tf.placeholder(dtype=tf.int32,shape=[batch_size],name=\"action\")\nreward_t = tf.placeholder(dtype=tf.float32,shape=[batch_size],name='reward')\ngamma_t = tf.placeholder(dtype=tf.float32,shape=[batch_size],name='discount_factor')\nq_next_t= tf.placeholder(dtype=tf.float32,shape=[batch_size,num_actions],name='q_next_value')\n \nqloss_t, q_extra_t = trfl.qlearning(q_t,action_t,reward_t,gamma_t,q_next_t)", "_____no_output_____" ] ], [ [ "** The RL Training Loop **\n\nIn the next cell we are going to define the training loop and then run it in the following cell. The goal is to estimate the action value of each state (the value of each state-action combination) using Q learning. action_value_array holds the estimated values. After each step the agent takes in the env, we update the action_value_array with the Q learning formula.\n\n** TRFL Usage **\n\nThe TRFL usage here is to run the trfl operation q_learning_t in sess.run(). We then take the output (q_learning_output) and extract the td_error part of that tensor. Using the td_error we update the action_value_array. For reference, the code below shows the full output of trfl.qlearning and the classic RL method of performing tabular Q learning updates.", "_____no_output_____" ] ], [ [ "def q_learning_action_value_estimate(env,episodes=1000,alpha=0.05,discount_factor=1.0,epsilon=0.1):\n \"\"\"\n Args:\n env: OpenAI env. env.P represents the transition probabilities of the environment.\n env.P[s][a] is a list of transition tuples (prob, next_state, reward, done).\n env.nS is a number of states in the environment. \n env.nA is a number of actions in the environment.\n episodes: number of episodes to run\n alpha: learning rate for state value updates\n discount_factor: Gamma discount factor. pcont_t TRFL argument\n \n Returns:\n Value of each state with random policy\n \"\"\"\n \n with tf.Session() as sess:\n #initialize the estimated state values to zero\n action_value_array = np.zeros((env.nS,env.nA))\n #reset the env\n current_state = env.reset()\n #env._render()\n\n #run through each episode taking a random action each time\n #upgrade estimated state value after each action\n current_episode = 0\n while current_episode < episodes:\n #choose action based on epsilon-greedy policy\n if np.random.rand() < epsilon:\n eg_action = env.action_space.sample()\n else:\n #Choose a greedy action. We will learn greedy actions with Q learning in the following cells.\n eg_action = np.argmax(action_value_array[current_state])\n \n #take a step using epsilon-greedy action\n next_state, rew, done, info = env.step(eg_action)\n \n #run TRFL operation in the session\n q_learning_output = sess.run([q_extra_t],feed_dict={q_t:np.expand_dims(action_value_array[current_state],axis=0),\n action_t:np.expand_dims(eg_action,axis=0),\n reward_t:np.expand_dims(rew,axis=0),\n gamma_t:np.expand_dims(discount_factor,axis=0),\n q_next_t:np.expand_dims(action_value_array[next_state],axis=0)})\n\n# trfl.qlearning() returns:\n# A namedtuple with fields:\n# loss: a tensor containing the batch of losses, shape [B]. \n# extra: a namedtuple with fields:\n# target: batch of target values for q_tm1[a_tm1], shape [B].\n# td_error: batch of temporal difference errors, shape [B].\n# Here we are using the td_error to update our action values. We will use the loss with a gradient descent optimizer in Deep Q Network session.\n\n #Use the Q learning TD error to update estimated state-action values\n action_value_array[current_state,eg_action] = action_value_array[current_state,eg_action] + alpha * q_learning_output[0].td_error\n \n #For reference, here is the tabular Q learning update method\n# max_q_value = np.max(action_value_array[next_state])\n# action_value_array[current_state,eg_action] = action_value_array[current_state,eg_action] + \\\n# alpha * (rew + discount_factor*max_q_value - action_value_array[current_state,eg_action])\n\n #if the epsiode is done, reset the env, if not the next state becomes the current state and the loop repeats\n if done:\n current_state = env.reset()\n current_episode += 1\n else:\n current_state = next_state\n\n\n return action_value_array\n \n\n ", "_____no_output_____" ], [ "#run episodes with Q learning and get the state value estimates\naction_values = q_learning_action_value_estimate(env,episodes=2000,alpha=0.1)\n\nprint(\"All Action Value Estimates:\")\nprint(np.round(action_values.reshape((16,4)),1))\nprint(\"each row is a state, each column is an action\")\nprint(\"\")\n\noptimal_action_estimates = np.max(action_values,axis=1)\nprint(\"Optimal Action Value Estimates:\")\nprint(np.round(optimal_action_estimates.reshape(env.shape),1))\nprint(\"estimate of the optimal State value at each state\")\nprint(\"\")", "All Action Value Estimates:\n[[ 0. 0. 0. 0. ]\n [-1.6 -2.4 -2.5 -1. ]\n [-2.2 -2.7 -2.8 -2. ]\n [-3.2 -3.2 -3. -3. ]\n [-1. -2.5 -2.4 -1.8]\n [-2. -2.7 -2.6 -2. ]\n [-3. -3. -3. -3. ]\n [-3. -2.5 -2. -3.2]\n [-2. -2.4 -2.3 -2.5]\n [-3. -3. -2.9 -3. ]\n [-2.8 -2. -2. -2.7]\n [-2.6 -1.7 -1. -2.2]\n [-3. -3. -3.3 -3.3]\n [-2.6 -2. -2.6 -2.9]\n [-1.8 -1. -1.7 -2.4]\n [ 0. 0. 0. 0. ]]\neach row is a state, each column is an action\n\nOptimal Action Value Estimates:\n[[ 0. -1. -2. -3. ]\n [-1. -2. -3. -2. ]\n [-2. -2.9 -2. -1. ]\n [-3. -2. -1. 0. ]]\nestimate of the optimal State value at each state\n\n" ] ], [ [ "The first output shows the estimated value for each action in each state. Ie row 4 column 4 is the value if the agent was in the upper right grid cell and took that action left. In the second output, we take the best action for each of the 16 states and show the agent's estimate of the state value assuming the agent always acts greedily.", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
c53050245fec1bb6796d2b916cd4f1b83c3ce449
90,320
ipynb
Jupyter Notebook
ML/DAT8-master/notebooks/13_advanced_model_evaluation.ipynb
praveenpmin/Python
513fcde7430b03a187e2c7e58302b88645388eed
[ "MIT" ]
null
null
null
ML/DAT8-master/notebooks/13_advanced_model_evaluation.ipynb
praveenpmin/Python
513fcde7430b03a187e2c7e58302b88645388eed
[ "MIT" ]
null
null
null
ML/DAT8-master/notebooks/13_advanced_model_evaluation.ipynb
praveenpmin/Python
513fcde7430b03a187e2c7e58302b88645388eed
[ "MIT" ]
null
null
null
81.516245
18,600
0.79029
[ [ [ "# Data Preparation and Advanced Model Evaluation", "_____no_output_____" ], [ "## Agenda\n\n**Data preparation**\n\n- Handling missing values\n- Handling categorical features (review)\n\n**Advanced model evaluation**\n\n- ROC curves and AUC\n- Bonus: ROC curve is only sensitive to rank order of predicted probabilities\n- Cross-validation", "_____no_output_____" ], [ "## Part 1: Handling missing values", "_____no_output_____" ], [ "scikit-learn models expect that all values are **numeric** and **hold meaning**. Thus, missing values are not allowed by scikit-learn.", "_____no_output_____" ] ], [ [ "# read the Titanic data\nimport pandas as pd\nurl = 'https://raw.githubusercontent.com/justmarkham/DAT8/master/data/titanic.csv'\ntitanic = pd.read_csv(url, index_col='PassengerId')\ntitanic.shape", "_____no_output_____" ], [ "# check for missing values\ntitanic.isnull().sum()", "_____no_output_____" ] ], [ [ "One possible strategy is to **drop missing values**:", "_____no_output_____" ] ], [ [ "# drop rows with any missing values\ntitanic.dropna().shape", "_____no_output_____" ], [ "# drop rows where Age is missing\ntitanic[titanic.Age.notnull()].shape", "_____no_output_____" ] ], [ [ "Sometimes a better strategy is to **impute missing values**:", "_____no_output_____" ] ], [ [ "# mean Age\ntitanic.Age.mean()", "_____no_output_____" ], [ "# median Age\ntitanic.Age.median()", "_____no_output_____" ], [ "# most frequent Age\ntitanic.Age.mode()", "_____no_output_____" ], [ "# fill missing values for Age with the median age\ntitanic.Age.fillna(titanic.Age.median(), inplace=True)", "_____no_output_____" ] ], [ [ "Another strategy would be to build a **KNN model** just to impute missing values. How would we do that?\n\nIf values are missing from a categorical feature, we could treat the missing values as **another category**. Why might that make sense?\n\nHow do we **choose** between all of these strategies?", "_____no_output_____" ], [ "## Part 2: Handling categorical features (Review)", "_____no_output_____" ], [ "How do we include a categorical feature in our model?\n\n- **Ordered categories:** transform them to sensible numeric values (example: small=1, medium=2, large=3)\n- **Unordered categories:** use dummy encoding (0/1)", "_____no_output_____" ] ], [ [ "titanic.head(10)", "_____no_output_____" ], [ "# encode Sex_Female feature\ntitanic['Sex_Female'] = titanic.Sex.map({'male':0, 'female':1})", "_____no_output_____" ], [ "# create a DataFrame of dummy variables for Embarked\nembarked_dummies = pd.get_dummies(titanic.Embarked, prefix='Embarked')\nembarked_dummies.drop(embarked_dummies.columns[0], axis=1, inplace=True)\n\n# concatenate the original DataFrame and the dummy DataFrame\ntitanic = pd.concat([titanic, embarked_dummies], axis=1)", "_____no_output_____" ], [ "titanic.head(1)", "_____no_output_____" ] ], [ [ "- How do we **interpret** the encoding for Embarked?\n- Why didn't we just encode Embarked using a **single feature** (C=0, Q=1, S=2)?\n- Does it matter which category we choose to define as the **baseline**?\n- Why do we only need **two dummy variables** for Embarked?", "_____no_output_____" ] ], [ [ "# define X and y\nfeature_cols = ['Pclass', 'Parch', 'Age', 'Sex_Female', 'Embarked_Q', 'Embarked_S']\nX = titanic[feature_cols]\ny = titanic.Survived\n\n# train/test split\nfrom sklearn.cross_validation import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)\n\n# train a logistic regression model\nfrom sklearn.linear_model import LogisticRegression\nlogreg = LogisticRegression(C=1e9)\nlogreg.fit(X_train, y_train)\n\n# make predictions for testing set\ny_pred_class = logreg.predict(X_test)\n\n# calculate testing accuracy\nfrom sklearn import metrics\nprint metrics.accuracy_score(y_test, y_pred_class)", "0.793721973094\n" ] ], [ [ "## Part 3: ROC curves and AUC", "_____no_output_____" ] ], [ [ "# predict probability of survival\ny_pred_prob = logreg.predict_proba(X_test)[:, 1]", "_____no_output_____" ], [ "%matplotlib inline\nimport matplotlib.pyplot as plt\nplt.rcParams['figure.figsize'] = (8, 6)\nplt.rcParams['font.size'] = 14", "_____no_output_____" ], [ "# plot ROC curve\nfpr, tpr, thresholds = metrics.roc_curve(y_test, y_pred_prob)\nplt.plot(fpr, tpr)\nplt.xlim([0.0, 1.0])\nplt.ylim([0.0, 1.0])\nplt.xlabel('False Positive Rate (1 - Specificity)')\nplt.ylabel('True Positive Rate (Sensitivity)')", "_____no_output_____" ], [ "# calculate AUC\nprint metrics.roc_auc_score(y_test, y_pred_prob)", "0.838692434211\n" ] ], [ [ "Besides allowing you to calculate AUC, seeing the ROC curve can help you to choose a threshold that **balances sensitivity and specificity** in a way that makes sense for the particular context.", "_____no_output_____" ] ], [ [ "# histogram of predicted probabilities grouped by actual response value\ndf = pd.DataFrame({'probability':y_pred_prob, 'actual':y_test})\ndf.hist(column='probability', by='actual', sharex=True, sharey=True)", "_____no_output_____" ] ], [ [ "What would have happened if you had used **y_pred_class** instead of **y_pred_prob** when drawing the ROC curve or calculating AUC?", "_____no_output_____" ] ], [ [ "# ROC curve using y_pred_class - WRONG!\nfpr, tpr, thresholds = metrics.roc_curve(y_test, y_pred_class)\nplt.plot(fpr, tpr)", "_____no_output_____" ], [ "# AUC using y_pred_class - WRONG!\nprint metrics.roc_auc_score(y_test, y_pred_class)", "0.780962171053\n" ] ], [ [ "If you use **y_pred_class**, it will interpret the zeros and ones as predicted probabilities of 0% and 100%.", "_____no_output_____" ], [ "## Bonus: ROC curve is only sensitive to rank order of predicted probabilities", "_____no_output_____" ] ], [ [ "# print the first 10 predicted probabilities\ny_pred_prob[:10]", "_____no_output_____" ], [ "# take the square root of predicted probabilities (to make them all bigger)\nimport numpy as np\ny_pred_prob_new = np.sqrt(y_pred_prob)\n\n# print the modified predicted probabilities\ny_pred_prob_new[:10]", "_____no_output_____" ], [ "# histogram of predicted probabilities has changed\ndf = pd.DataFrame({'probability':y_pred_prob_new, 'actual':y_test})\ndf.hist(column='probability', by='actual', sharex=True, sharey=True)", "_____no_output_____" ], [ "# ROC curve did not change\nfpr, tpr, thresholds = metrics.roc_curve(y_test, y_pred_prob_new)\nplt.plot(fpr, tpr)", "_____no_output_____" ], [ "# AUC did not change\nprint metrics.roc_auc_score(y_test, y_pred_prob_new)", "0.838692434211\n" ] ], [ [ "## Part 4: Cross-validation", "_____no_output_____" ] ], [ [ "# calculate cross-validated AUC\nfrom sklearn.cross_validation import cross_val_score\ncross_val_score(logreg, X, y, cv=10, scoring='roc_auc').mean()", "_____no_output_____" ], [ "# add Fare to the model\nfeature_cols = ['Pclass', 'Parch', 'Age', 'Sex_Female', 'Embarked_Q', 'Embarked_S', 'Fare']\nX = titanic[feature_cols]\n\n# recalculate AUC\ncross_val_score(logreg, X, y, cv=10, scoring='roc_auc').mean()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
c5305af17d6501851497301fc481ac57e423644e
99,166
ipynb
Jupyter Notebook
rnns/intro-to-rnns/.ipynb_checkpoints/Tensorboard example-checkpoint.ipynb
anthony-sarkis/deep-learning-foundations
4c043f2b7438aaeaad5e588566f73740a9429cb8
[ "MIT" ]
1
2022-03-22T23:06:48.000Z
2022-03-22T23:06:48.000Z
rnns/intro-to-rnns/.ipynb_checkpoints/Tensorboard example-checkpoint.ipynb
anthony-sarkis/deep-learning-foundations
4c043f2b7438aaeaad5e588566f73740a9429cb8
[ "MIT" ]
null
null
null
rnns/intro-to-rnns/.ipynb_checkpoints/Tensorboard example-checkpoint.ipynb
anthony-sarkis/deep-learning-foundations
4c043f2b7438aaeaad5e588566f73740a9429cb8
[ "MIT" ]
null
null
null
75.931087
7,086
0.593308
[ [ [ "# Tensorboard example\n", "_____no_output_____" ] ], [ [ "import time\nfrom collections import namedtuple\nimport numpy as np\nimport tensorflow as tf", "_____no_output_____" ], [ "with open('anna.txt', 'r') as f:\n text=f.read()\nvocab = set(text)\nvocab_to_int = {c: i for i, c in enumerate(vocab)}\nint_to_vocab = dict(enumerate(vocab))\nencoded = np.array([vocab_to_int[c] for c in text], dtype=np.int32)", "_____no_output_____" ], [ "text[:100]", "_____no_output_____" ], [ "encoded[:100]", "_____no_output_____" ] ], [ [ "Since the network is working with individual characters, it's similar to a classification problem in which we are trying to predict the next character from the previous text. Here's how many 'classes' our network has to pick from.", "_____no_output_____" ] ], [ [ "len(vocab)", "_____no_output_____" ], [ "def get_batches(arr, n_seqs, n_steps_per_seq):\n '''Create a generator that returns batches of size\n n_seqs x n_steps from arr.\n \n Arguments\n ---------\n arr: Array you want to make batches from\n n_seqs: Batch size, the number of sequences per batch\n n_steps: Number of sequence steps per batch\n '''\n # Get the batch size and number of batches we can make\n # ie n_seq = 10, n_steps_per_sew = 2, batch_size = 20\n \n batch_size = n_seqs * n_steps_per_seq\n \n # ie arr= 40, over 20, so 2 batches\n n_batches = len(arr) // batch_size\n \n # Keep only enough characters to make full batches\n # n_batches = 2 * batch_size = 20 = 40??\n # why not simply use len(arr)?\n \n arr = arr[ : n_batches * batch_size]\n \n # Reshape into n_seqs rows\n arr = arr.reshape((n_seqs, -1))\n \n for n in range(0, arr.shape[1], n_steps_per_seq):\n # The features\n x = arr[ :, n: n + n_steps_per_seq]\n # The targets, shifted by one\n y = np.zeros_like(x)\n y[ :, : -1], y[ : , -1] = x[ :, 1: ], x[ :, 0]\n yield x, y", "_____no_output_____" ], [ "batches = get_batches(encoded, 10, 50)\nx, y = next(batches)", "_____no_output_____" ], [ "def build_inputs(batch_size, num_steps):\n ''' Define placeholders for inputs, targets, and dropout \n \n Arguments\n ---------\n batch_size: Batch size, number of sequences per batch\n num_steps: Number of sequence steps in a batch\n \n '''\n with tf.name_scope('inputs'):\n # Declare placeholders we'll feed into the graph\n inputs = tf.placeholder(tf.int32, (batch_size, num_steps), name=\"inputs\")\n targets = tf.placeholder(tf.int32, (batch_size, num_steps), name=\"targets\")\n\n # Keep probability placeholder for drop out layers\n keep_prob = tf.placeholder(tf.float32, name='keep_prob')\n \n return inputs, targets, keep_prob", "_____no_output_____" ], [ "def single_lstm_cell(lstm_size, keep_prob):\n \n with tf.name_scope(\"RNN_layers\"):\n lstm = tf.contrib.rnn.NASCell(lstm_size, reuse = tf.get_variable_scope().reuse)\n \n # Add dropout to the cell outputs\n drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob = keep_prob)\n \n return drop", "_____no_output_____" ], [ "def build_lstm(lstm_size, num_layers, batch_size, keep_prob):\n ''' Build LSTM cell.\n \n Arguments\n ---------\n keep_prob: Scalar tensor (tf.placeholder) for the dropout keep probability\n lstm_size: Size of the hidden layers in the LSTM cells\n num_layers: Number of LSTM layers\n batch_size: Batch size\n\n '''\n ### Build the LSTM Cell\n \n # Stack up multiple LSTM layers, for deep learning\n \n with tf.name_scope(\"RNN_layers\"):\n rnn_cells = tf.contrib.rnn.MultiRNNCell([single_lstm_cell(lstm_size, keep_prob) for _ in range(num_layers)], \n state_is_tuple = True)\n \n with tf.name_scope(\"RNN_init_state\"):\n initial_state = rnn_cells.zero_state(batch_size, tf.float32)\n \n return rnn_cells, initial_state", "_____no_output_____" ], [ "def build_output(lstm_output, in_size, out_size):\n ''' Build a softmax layer, return the softmax output and logits.\n \n Arguments\n ---------\n lstm_output: List of output tensors from the LSTM layer\n in_size: Size of the input tensor, for example, size of the LSTM cells\n out_size: Size of this softmax layer\n \n '''\n\n # Reshape output so it's a bunch of rows, one row for each step for each sequence.\n \n # Concatenate lstm_output over axis 1 (the columns)\n # ie t1 = t1 = [[1, 2, 3], [4, 5, 6]]\n # t2 = [[7, 8, 9], [10, 11, 12]]\n # tf.concat([t1, t2], 1) ==> [[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]]\n seq_output = tf.concat(lstm_output, axis=1)\n \n # Reshape seq_output to a 2D tensor with lstm_size columns\n x = tf.reshape(lstm_output, [-1, in_size])\n \n # Connect the RNN outputs to a softmax layer\n with tf.variable_scope('softmax'):\n # Create the weight and bias variables here\n softmax_w = tf.Variable(tf.truncated_normal( (in_size, out_size), stddev=0.1))\n softmax_b = tf.Variable(tf.zeros( out_size ))\n \n # tensorboard\n tf.summary.histogram(\"softmax_w\", softmax_w)\n \n # Since output is a bunch of rows of RNN cell outputs, logits will be a bunch\n # of rows of logit outputs, one for each step and sequence\n logits = tf.matmul(x, softmax_w) + softmax_b\n \n # Use softmax to get the probabilities for predicted characters\n out = tf.nn.softmax(logits, name=\"predictions\")\n tf.summary.histogram(\"predictions\", out)\n \n return out, logits", "_____no_output_____" ], [ "def build_loss(logits, targets, lstm_size, num_classes):\n ''' Calculate the loss from the logits and the targets.\n \n Arguments\n ---------\n logits: Logits from final fully connected layer\n targets: Targets for supervised learning\n lstm_size: Number of LSTM hidden units\n num_classes: Number of classes in targets\n \n '''\n \n # One-hot encode targets and reshape to match logits, one row per sequence per step\n y_one_hot = tf.one_hot(targets, num_classes)\n y_reshaped = tf.reshape( y_one_hot, logits.get_shape() )\n \n # Softmax cross entropy loss\n loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_reshaped)\n loss = tf.reduce_mean(loss)\n \n # tensorboard\n tf.summary.scalar('loss', loss)\n \n return loss", "_____no_output_____" ], [ "def build_optimizer(loss, learning_rate, grad_clip):\n ''' Build optmizer for training, using gradient clipping.\n \n Arguments:\n loss: Network loss\n learning_rate: Learning rate for optimizer\n \n '''\n \n # Optimizer for training, using gradient clipping to control exploding gradients\n tvars = tf.trainable_variables()\n grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars), grad_clip)\n train_op = tf.train.AdamOptimizer(learning_rate)\n optimizer = train_op.apply_gradients(zip(grads, tvars))\n \n return optimizer", "_____no_output_____" ], [ "class CharRNN:\n \n def __init__(self, num_classes, batch_size=64, num_steps=50, \n lstm_size=128, num_layers=2, learning_rate=0.001, \n grad_clip=5, sampling=False):\n \n # When we're using this network for sampling later, we'll be passing in\n # one character at a time, so providing an option for that\n if sampling == True:\n batch_size, num_steps = 1, 1\n else:\n batch_size, num_steps = batch_size, num_steps\n\n tf.reset_default_graph()\n \n # Build the input placeholder tensors\n self.inputs, self.targets, self.keep_prob = build_inputs(batch_size, num_steps)\n x_one_hot = tf.one_hot(self.inputs, num_classes, name=\"x_one_hot\")\n\n with tf.name_scope(\"RNN_layers\"):\n # Build the LSTM cell\n cells, self.initial_state = build_lstm(lstm_size, num_layers, batch_size, self.keep_prob)\n \n\n ### Run the data through the RNN layers\n with tf.name_scope(\"RNN_forward\"):\n # Run each sequence step through the RNN with tf.nn.dynamic_rnn \n outputs, state = tf.nn.dynamic_rnn(cells, x_one_hot, initial_state=self.initial_state)\n \n \n self.final_state = state\n \n # Get softmax predictions and logits\n self.prediction, self.logits = build_output(outputs, lstm_size, num_classes)\n \n # Loss and optimizer (with gradient clipping)\n self.loss = build_loss(self.logits, self.targets, lstm_size, num_classes)\n self.optimizer = build_optimizer(self.loss, learning_rate, grad_clip)", "_____no_output_____" ], [ "batch_size = 64 # Sequences per batch\nnum_steps = 128 # Number of sequence steps per batch\nlstm_size = 512 # Size of hidden layers in LSTMs\nnum_layers = 2 # Number of LSTM layers\nlearning_rate = 0.001 # Learning rate\nkeep_prob = 0.5 # Dropout keep probability", "_____no_output_____" ], [ "model = CharRNN(len(vocab), batch_size=batch_size, num_steps=num_steps,\n lstm_size=lstm_size, num_layers=num_layers, \n learning_rate=learning_rate)", "_____no_output_____" ], [ "epochs = 3\n# Save every N iterations\nsave_every_n = 200\n\nsaver = tf.train.Saver(max_to_keep=100)\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n \n # Tensoboard\n train_writer = tf.summary.FileWriter('./logs/1/train', sess.graph)\n test_writer = tf.summary.FileWriter('./logs/1/test')\n \n # Use the line below to load a checkpoint and resume training\n #saver.restore(sess, 'checkpoints/______.ckpt')\n counter = 0\n for e in range(epochs):\n # Train network\n new_state = sess.run(model.initial_state)\n loss = 0\n for x, y in get_batches(encoded, batch_size, num_steps):\n counter += 1\n start = time.time()\n feed = {model.inputs: x,\n model.targets: y,\n model.keep_prob: keep_prob,\n model.initial_state: new_state}\n \n merged = tf.summary.merge_all() # Tensorboard\n summary, batch_loss, new_state, _ = sess.run([merged, model.loss, \n model.final_state, \n model.optimizer], \n feed_dict=feed)\n \n train_writer.add_summary(summary, counter)\n end = time.time()\n print('Epoch: {}/{}... '.format(e+1, epochs),\n 'Training Step: {}... '.format(counter),\n 'Training loss: {:.4f}... '.format(batch_loss),\n '{:.4f} sec/batch'.format((end-start)))\n \n if (counter % save_every_n == 0):\n saver.save(sess, \"checkpoints/i{}_l{}.ckpt\".format(counter, lstm_size))\n \n saver.save(sess, \"checkpoints/i{}_l{}.ckpt\".format(counter, lstm_size))", "Epoch: 1/3... Training Step: 1... Training loss: 4.4191... 0.7205 sec/batch\nEpoch: 1/3... Training Step: 2... Training loss: 4.4164... 0.5005 sec/batch\nEpoch: 1/3... Training Step: 3... Training loss: 4.4136... 0.5001 sec/batch\nEpoch: 1/3... Training Step: 4... Training loss: 4.4104... 0.4955 sec/batch\nEpoch: 1/3... Training Step: 5... Training loss: 4.4066... 0.4832 sec/batch\nEpoch: 1/3... Training Step: 6... Training loss: 4.4017... 0.5049 sec/batch\nEpoch: 1/3... Training Step: 7... Training loss: 4.3954... 0.4806 sec/batch\nEpoch: 1/3... Training Step: 8... Training loss: 4.3867... 0.5163 sec/batch\nEpoch: 1/3... Training Step: 9... Training loss: 4.3744... 0.5002 sec/batch\nEpoch: 1/3... Training Step: 10... Training loss: 4.3562... 0.5011 sec/batch\nEpoch: 1/3... Training Step: 11... Training loss: 4.3280... 0.4996 sec/batch\nEpoch: 1/3... Training Step: 12... Training loss: 4.2707... 0.4847 sec/batch\nEpoch: 1/3... Training Step: 13... Training loss: 4.1100... 0.5005 sec/batch\nEpoch: 1/3... Training Step: 14... Training loss: 3.8801... 0.5065 sec/batch\nEpoch: 1/3... Training Step: 15... Training loss: 3.7083... 0.5000 sec/batch\nEpoch: 1/3... Training Step: 16... Training loss: 3.4019... 0.5166 sec/batch\nEpoch: 1/3... Training Step: 17... Training loss: 3.3170... 0.5203 sec/batch\nEpoch: 1/3... Training Step: 18... Training loss: 3.3606... 0.5186 sec/batch\nEpoch: 1/3... Training Step: 19... Training loss: 3.3030... 0.5239 sec/batch\nEpoch: 1/3... Training Step: 20... Training loss: 3.2861... 0.5237 sec/batch\nEpoch: 1/3... Training Step: 21... Training loss: 3.3011... 0.5279 sec/batch\nEpoch: 1/3... Training Step: 22... Training loss: 3.3019... 0.5222 sec/batch\nEpoch: 1/3... Training Step: 23... Training loss: 3.3059... 0.5459 sec/batch\nEpoch: 1/3... Training Step: 24... Training loss: 3.2711... 0.5194 sec/batch\nEpoch: 1/3... Training Step: 25... Training loss: 3.2848... 0.4962 sec/batch\nEpoch: 1/3... Training Step: 26... Training loss: 3.2100... 0.5005 sec/batch\nEpoch: 1/3... Training Step: 27... Training loss: 3.2075... 0.5184 sec/batch\nEpoch: 1/3... Training Step: 28... Training loss: 3.2106... 0.5127 sec/batch\nEpoch: 1/3... Training Step: 29... Training loss: 3.1898... 0.5171 sec/batch\nEpoch: 1/3... Training Step: 30... Training loss: 3.1892... 0.5084 sec/batch\nEpoch: 1/3... Training Step: 31... Training loss: 3.2124... 0.5892 sec/batch\nEpoch: 1/3... Training Step: 32... Training loss: 3.1842... 0.5501 sec/batch\nEpoch: 1/3... Training Step: 33... Training loss: 3.2244... 0.5677 sec/batch\nEpoch: 1/3... Training Step: 34... Training loss: 3.2147... 0.5401 sec/batch\nEpoch: 1/3... Training Step: 35... Training loss: 3.1993... 0.5451 sec/batch\nEpoch: 1/3... Training Step: 36... Training loss: 3.1708... 0.5590 sec/batch\nEpoch: 1/3... Training Step: 37... Training loss: 3.1877... 0.5186 sec/batch\nEpoch: 1/3... Training Step: 38... Training loss: 3.1853... 0.5243 sec/batch\nEpoch: 1/3... Training Step: 39... Training loss: 3.2013... 0.5236 sec/batch\nEpoch: 1/3... Training Step: 40... Training loss: 3.1533... 0.5445 sec/batch\nEpoch: 1/3... Training Step: 41... Training loss: 3.1635... 0.5267 sec/batch\nEpoch: 1/3... Training Step: 42... Training loss: 3.1570... 0.5181 sec/batch\nEpoch: 1/3... Training Step: 43... Training loss: 3.1591... 0.5213 sec/batch\nEpoch: 1/3... Training Step: 44... Training loss: 3.1567... 0.4904 sec/batch\nEpoch: 1/3... Training Step: 45... Training loss: 3.1777... 0.5006 sec/batch\nEpoch: 1/3... Training Step: 46... Training loss: 3.1566... 0.5003 sec/batch\nEpoch: 1/3... Training Step: 47... Training loss: 3.1885... 0.5371 sec/batch\nEpoch: 1/3... Training Step: 48... Training loss: 3.1526... 0.4933 sec/batch\nEpoch: 1/3... Training Step: 49... Training loss: 3.1552... 0.4889 sec/batch\nEpoch: 1/3... Training Step: 50... Training loss: 3.1474... 0.5120 sec/batch\nEpoch: 1/3... Training Step: 51... Training loss: 3.1654... 0.5110 sec/batch\nEpoch: 1/3... Training Step: 52... Training loss: 3.1557... 0.6216 sec/batch\nEpoch: 1/3... Training Step: 53... Training loss: 3.1379... 0.5661 sec/batch\nEpoch: 1/3... Training Step: 54... Training loss: 3.1443... 0.5576 sec/batch\nEpoch: 1/3... Training Step: 55... Training loss: 3.1554... 0.5450 sec/batch\nEpoch: 1/3... Training Step: 56... Training loss: 3.1562... 0.5678 sec/batch\nEpoch: 1/3... Training Step: 57... Training loss: 3.1650... 0.5110 sec/batch\nEpoch: 1/3... Training Step: 58... Training loss: 3.1539... 0.5172 sec/batch\nEpoch: 1/3... Training Step: 59... Training loss: 3.1464... 0.5337 sec/batch\nEpoch: 1/3... Training Step: 60... Training loss: 3.1779... 0.4916 sec/batch\nEpoch: 1/3... Training Step: 61... Training loss: 3.1566... 0.5383 sec/batch\nEpoch: 1/3... Training Step: 62... Training loss: 3.1512... 0.5486 sec/batch\nEpoch: 1/3... Training Step: 63... Training loss: 3.1556... 0.5007 sec/batch\nEpoch: 1/3... Training Step: 64... Training loss: 3.1490... 0.4947 sec/batch\nEpoch: 1/3... Training Step: 65... Training loss: 3.1620... 0.5029 sec/batch\nEpoch: 1/3... Training Step: 66... Training loss: 3.1578... 0.5005 sec/batch\nEpoch: 1/3... Training Step: 67... Training loss: 3.1370... 0.5006 sec/batch\nEpoch: 1/3... Training Step: 68... Training loss: 3.1490... 0.5024 sec/batch\nEpoch: 1/3... Training Step: 69... Training loss: 3.1683... 0.5135 sec/batch\nEpoch: 1/3... Training Step: 70... Training loss: 3.1493... 0.5114 sec/batch\nEpoch: 1/3... Training Step: 71... Training loss: 3.1756... 0.4883 sec/batch\nEpoch: 1/3... Training Step: 72... Training loss: 3.1229... 0.5147 sec/batch\nEpoch: 1/3... Training Step: 73... Training loss: 3.1519... 0.4983 sec/batch\nEpoch: 1/3... Training Step: 74... Training loss: 3.1618... 0.4996 sec/batch\nEpoch: 1/3... Training Step: 75... Training loss: 3.1456... 0.5302 sec/batch\nEpoch: 1/3... Training Step: 76... Training loss: 3.1426... 0.5340 sec/batch\nEpoch: 1/3... Training Step: 77... Training loss: 3.1276... 0.5409 sec/batch\nEpoch: 1/3... Training Step: 78... Training loss: 3.1403... 0.4985 sec/batch\nEpoch: 1/3... Training Step: 79... Training loss: 3.1484... 0.5438 sec/batch\nEpoch: 1/3... Training Step: 80... Training loss: 3.1362... 0.5561 sec/batch\nEpoch: 1/3... Training Step: 81... Training loss: 3.1681... 0.5364 sec/batch\nEpoch: 1/3... Training Step: 82... Training loss: 3.1552... 0.5297 sec/batch\nEpoch: 1/3... Training Step: 83... Training loss: 3.1361... 0.5191 sec/batch\nEpoch: 1/3... Training Step: 84... Training loss: 3.1269... 0.5168 sec/batch\nEpoch: 1/3... Training Step: 85... Training loss: 3.1488... 0.5412 sec/batch\nEpoch: 1/3... Training Step: 86... Training loss: 3.1427... 0.5024 sec/batch\nEpoch: 1/3... Training Step: 87... Training loss: 3.1502... 0.5116 sec/batch\nEpoch: 1/3... Training Step: 88... Training loss: 3.1386... 0.5426 sec/batch\nEpoch: 1/3... Training Step: 89... Training loss: 3.1538... 0.6118 sec/batch\nEpoch: 1/3... Training Step: 90... Training loss: 3.1288... 0.5395 sec/batch\nEpoch: 1/3... Training Step: 91... Training loss: 3.1141... 0.4964 sec/batch\nEpoch: 1/3... Training Step: 92... Training loss: 3.1599... 0.5533 sec/batch\nEpoch: 1/3... Training Step: 93... Training loss: 3.1249... 0.5003 sec/batch\nEpoch: 1/3... Training Step: 94... Training loss: 3.1232... 0.5325 sec/batch\nEpoch: 1/3... Training Step: 95... Training loss: 3.1634... 0.5278 sec/batch\nEpoch: 1/3... Training Step: 96... Training loss: 3.1254... 0.4982 sec/batch\nEpoch: 1/3... Training Step: 97... Training loss: 3.1521... 0.5255 sec/batch\nEpoch: 1/3... Training Step: 98... Training loss: 3.1198... 0.5004 sec/batch\nEpoch: 1/3... Training Step: 99... Training loss: 3.1246... 0.5244 sec/batch\nEpoch: 1/3... Training Step: 100... Training loss: 3.1324... 0.5448 sec/batch\nEpoch: 1/3... Training Step: 101... Training loss: 3.1177... 0.5271 sec/batch\nEpoch: 1/3... Training Step: 102... Training loss: 3.1181... 0.4925 sec/batch\nEpoch: 1/3... Training Step: 103... Training loss: 3.1410... 0.4915 sec/batch\nEpoch: 1/3... Training Step: 104... Training loss: 3.1219... 0.5105 sec/batch\nEpoch: 1/3... Training Step: 105... Training loss: 3.1081... 0.5373 sec/batch\nEpoch: 1/3... Training Step: 106... Training loss: 3.1387... 0.4995 sec/batch\nEpoch: 1/3... Training Step: 107... Training loss: 3.1457... 0.5194 sec/batch\nEpoch: 1/3... Training Step: 108... Training loss: 3.1274... 0.5374 sec/batch\nEpoch: 1/3... Training Step: 109... Training loss: 3.1236... 0.5314 sec/batch\nEpoch: 1/3... Training Step: 110... Training loss: 3.1242... 0.5183 sec/batch\nEpoch: 1/3... Training Step: 111... Training loss: 3.1244... 0.4932 sec/batch\nEpoch: 1/3... Training Step: 112... Training loss: 3.1236... 0.4828 sec/batch\nEpoch: 1/3... Training Step: 113... Training loss: 3.1184... 0.5034 sec/batch\nEpoch: 1/3... Training Step: 114... Training loss: 3.1179... 0.5300 sec/batch\nEpoch: 1/3... Training Step: 115... Training loss: 3.1441... 0.5045 sec/batch\nEpoch: 1/3... Training Step: 116... Training loss: 3.1340... 0.5000 sec/batch\nEpoch: 1/3... Training Step: 117... Training loss: 3.1128... 0.5441 sec/batch\nEpoch: 1/3... Training Step: 118... Training loss: 3.1239... 0.5536 sec/batch\nEpoch: 1/3... Training Step: 119... Training loss: 3.1191... 0.5856 sec/batch\nEpoch: 1/3... Training Step: 120... Training loss: 3.1280... 0.5631 sec/batch\nEpoch: 1/3... Training Step: 121... Training loss: 3.0985... 0.5489 sec/batch\nEpoch: 1/3... Training Step: 122... Training loss: 3.1120... 0.5451 sec/batch\nEpoch: 1/3... Training Step: 123... Training loss: 3.1195... 0.5294 sec/batch\nEpoch: 1/3... Training Step: 124... Training loss: 3.1286... 0.5332 sec/batch\nEpoch: 1/3... Training Step: 125... Training loss: 3.1505... 0.5069 sec/batch\nEpoch: 1/3... Training Step: 126... Training loss: 3.1414... 0.5822 sec/batch\nEpoch: 1/3... Training Step: 127... Training loss: 3.1051... 0.5342 sec/batch\nEpoch: 1/3... Training Step: 128... Training loss: 3.1259... 0.5142 sec/batch\nEpoch: 1/3... Training Step: 129... Training loss: 3.1153... 0.5221 sec/batch\nEpoch: 1/3... Training Step: 130... Training loss: 3.1082... 0.4885 sec/batch\nEpoch: 1/3... Training Step: 131... Training loss: 3.1041... 0.5342 sec/batch\nEpoch: 1/3... Training Step: 132... Training loss: 3.1056... 0.5181 sec/batch\nEpoch: 1/3... Training Step: 133... Training loss: 3.1070... 0.5157 sec/batch\nEpoch: 1/3... Training Step: 134... Training loss: 3.0994... 0.5300 sec/batch\nEpoch: 1/3... Training Step: 135... Training loss: 3.1358... 0.5793 sec/batch\nEpoch: 1/3... Training Step: 136... Training loss: 3.1339... 0.5597 sec/batch\nEpoch: 1/3... Training Step: 137... Training loss: 3.1131... 0.5309 sec/batch\nEpoch: 1/3... Training Step: 138... Training loss: 3.0987... 0.5214 sec/batch\nEpoch: 1/3... Training Step: 139... Training loss: 3.1037... 0.5131 sec/batch\nEpoch: 1/3... Training Step: 140... Training loss: 3.1314... 0.5213 sec/batch\nEpoch: 1/3... Training Step: 141... Training loss: 3.0994... 0.5107 sec/batch\nEpoch: 1/3... Training Step: 142... Training loss: 3.0943... 0.5004 sec/batch\nEpoch: 1/3... Training Step: 143... Training loss: 3.1105... 0.5494 sec/batch\nEpoch: 1/3... Training Step: 144... Training loss: 3.0922... 0.5079 sec/batch\nEpoch: 1/3... Training Step: 145... Training loss: 3.1159... 0.5191 sec/batch\nEpoch: 1/3... Training Step: 146... Training loss: 3.1074... 0.5063 sec/batch\nEpoch: 1/3... Training Step: 147... Training loss: 3.1010... 0.5248 sec/batch\nEpoch: 1/3... Training Step: 148... Training loss: 3.1088... 0.5309 sec/batch\nEpoch: 1/3... Training Step: 149... Training loss: 3.1266... 0.5173 sec/batch\nEpoch: 1/3... Training Step: 150... Training loss: 3.1193... 0.5284 sec/batch\nEpoch: 1/3... Training Step: 151... Training loss: 3.1020... 0.5272 sec/batch\nEpoch: 1/3... Training Step: 152... Training loss: 3.1155... 0.5051 sec/batch\nEpoch: 1/3... Training Step: 153... Training loss: 3.1172... 0.5138 sec/batch\nEpoch: 1/3... Training Step: 154... Training loss: 3.1014... 0.5028 sec/batch\nEpoch: 1/3... Training Step: 155... Training loss: 3.0989... 0.5169 sec/batch\nEpoch: 1/3... Training Step: 156... Training loss: 3.0677... 0.5012 sec/batch\nEpoch: 1/3... Training Step: 157... Training loss: 3.0917... 0.5158 sec/batch\nEpoch: 1/3... Training Step: 158... Training loss: 3.0753... 0.5143 sec/batch\nEpoch: 1/3... Training Step: 159... Training loss: 3.0880... 0.5069 sec/batch\nEpoch: 1/3... Training Step: 160... Training loss: 3.0859... 0.4973 sec/batch\nEpoch: 1/3... Training Step: 161... Training loss: 3.0836... 0.5073 sec/batch\nEpoch: 1/3... Training Step: 162... Training loss: 3.0660... 0.5096 sec/batch\nEpoch: 1/3... Training Step: 163... Training loss: 3.0575... 0.5172 sec/batch\nEpoch: 1/3... Training Step: 164... Training loss: 3.0516... 0.5004 sec/batch\nEpoch: 1/3... Training Step: 165... Training loss: 3.0689... 0.5111 sec/batch\nEpoch: 1/3... Training Step: 166... Training loss: 3.0731... 0.5052 sec/batch\nEpoch: 1/3... Training Step: 167... Training loss: 3.0922... 0.5053 sec/batch\nEpoch: 1/3... Training Step: 168... Training loss: 3.0509... 0.4946 sec/batch\nEpoch: 1/3... Training Step: 169... Training loss: 3.0757... 0.4999 sec/batch\nEpoch: 1/3... Training Step: 170... Training loss: 3.0792... 0.5000 sec/batch\nEpoch: 1/3... Training Step: 171... Training loss: 3.0632... 0.4966 sec/batch\nEpoch: 1/3... Training Step: 172... Training loss: 3.0606... 0.5202 sec/batch\nEpoch: 1/3... Training Step: 173... Training loss: 3.0520... 0.5010 sec/batch\nEpoch: 1/3... Training Step: 174... Training loss: 3.0413... 0.4990 sec/batch\nEpoch: 1/3... Training Step: 175... Training loss: 3.0465... 0.5187 sec/batch\nEpoch: 1/3... Training Step: 176... Training loss: 3.0115... 0.5035 sec/batch\nEpoch: 1/3... Training Step: 177... Training loss: 3.0234... 0.4990 sec/batch\nEpoch: 1/3... Training Step: 178... Training loss: 3.0479... 0.4998 sec/batch\nEpoch: 1/3... Training Step: 179... Training loss: 3.0261... 0.5000 sec/batch\nEpoch: 1/3... Training Step: 180... Training loss: 3.0382... 0.5065 sec/batch\nEpoch: 1/3... Training Step: 181... Training loss: 3.0056... 0.5268 sec/batch\nEpoch: 1/3... Training Step: 182... Training loss: 3.0226... 0.5443 sec/batch\nEpoch: 1/3... Training Step: 183... Training loss: 3.0241... 0.5239 sec/batch\nEpoch: 1/3... Training Step: 184... Training loss: 3.0322... 0.5027 sec/batch\nEpoch: 1/3... Training Step: 185... Training loss: 3.0104... 0.5287 sec/batch\nEpoch: 1/3... Training Step: 186... Training loss: 3.0093... 0.5373 sec/batch\nEpoch: 1/3... Training Step: 187... Training loss: 2.9884... 0.4952 sec/batch\nEpoch: 1/3... Training Step: 188... Training loss: 3.0011... 0.5022 sec/batch\nEpoch: 1/3... Training Step: 189... Training loss: 3.0025... 0.5025 sec/batch\nEpoch: 1/3... Training Step: 190... Training loss: 2.9902... 0.5002 sec/batch\nEpoch: 1/3... Training Step: 191... Training loss: 2.9750... 0.5165 sec/batch\nEpoch: 1/3... Training Step: 192... Training loss: 2.9817... 0.5003 sec/batch\nEpoch: 1/3... Training Step: 193... Training loss: 2.9633... 0.5155 sec/batch\nEpoch: 1/3... Training Step: 194... Training loss: 2.9843... 0.5109 sec/batch\nEpoch: 1/3... Training Step: 195... Training loss: 2.9262... 0.4889 sec/batch\nEpoch: 1/3... Training Step: 196... Training loss: 2.9407... 0.5030 sec/batch\nEpoch: 1/3... Training Step: 197... Training loss: 2.9243... 0.4989 sec/batch\nEpoch: 1/3... Training Step: 198... Training loss: 2.9258... 0.5188 sec/batch\nEpoch: 1/3... Training Step: 199... Training loss: 2.9501... 0.5136 sec/batch\nEpoch: 1/3... Training Step: 200... Training loss: 2.9362... 0.5001 sec/batch\nEpoch: 1/3... Training Step: 201... Training loss: 2.9081... 0.5167 sec/batch\nEpoch: 1/3... Training Step: 202... Training loss: 2.9295... 0.5158 sec/batch\nEpoch: 1/3... Training Step: 203... Training loss: 2.9601... 0.5142 sec/batch\nEpoch: 1/3... Training Step: 204... Training loss: 2.9409... 0.5321 sec/batch\nEpoch: 1/3... Training Step: 205... Training loss: 2.9040... 0.5576 sec/batch\nEpoch: 1/3... Training Step: 206... Training loss: 2.8826... 0.5224 sec/batch\nEpoch: 1/3... Training Step: 207... Training loss: 2.8515... 0.5209 sec/batch\nEpoch: 1/3... Training Step: 208... Training loss: 2.8641... 0.5004 sec/batch\nEpoch: 1/3... Training Step: 209... Training loss: 2.8108... 0.5354 sec/batch\nEpoch: 1/3... Training Step: 210... Training loss: 2.8172... 0.5330 sec/batch\nEpoch: 1/3... Training Step: 211... Training loss: 2.8143... 0.5537 sec/batch\nEpoch: 1/3... Training Step: 212... Training loss: 2.8492... 0.5153 sec/batch\nEpoch: 1/3... Training Step: 213... Training loss: 2.8509... 0.5006 sec/batch\nEpoch: 1/3... Training Step: 214... Training loss: 2.7834... 0.5235 sec/batch\nEpoch: 1/3... Training Step: 215... Training loss: 2.7728... 0.5459 sec/batch\nEpoch: 1/3... Training Step: 216... Training loss: 2.7916... 0.5003 sec/batch\nEpoch: 1/3... Training Step: 217... Training loss: 2.7676... 0.5529 sec/batch\nEpoch: 1/3... Training Step: 218... Training loss: 2.7732... 0.5198 sec/batch\nEpoch: 1/3... Training Step: 219... Training loss: 2.7472... 0.5414 sec/batch\nEpoch: 1/3... Training Step: 220... Training loss: 2.7525... 0.5472 sec/batch\nEpoch: 1/3... Training Step: 221... Training loss: 2.7333... 0.5251 sec/batch\nEpoch: 1/3... Training Step: 222... Training loss: 2.7393... 0.5273 sec/batch\nEpoch: 1/3... Training Step: 223... Training loss: 2.7280... 0.5225 sec/batch\nEpoch: 1/3... Training Step: 224... Training loss: 2.7421... 0.5381 sec/batch\nEpoch: 1/3... Training Step: 225... Training loss: 2.7122... 0.5489 sec/batch\nEpoch: 1/3... Training Step: 226... Training loss: 2.6896... 0.5211 sec/batch\nEpoch: 1/3... Training Step: 227... Training loss: 2.6976... 0.5634 sec/batch\nEpoch: 1/3... Training Step: 228... Training loss: 2.7139... 0.5165 sec/batch\nEpoch: 1/3... Training Step: 229... Training loss: 2.7311... 0.5877 sec/batch\nEpoch: 1/3... Training Step: 230... Training loss: 2.7034... 0.5263 sec/batch\nEpoch: 1/3... Training Step: 231... Training loss: 2.6705... 0.5704 sec/batch\nEpoch: 1/3... Training Step: 232... Training loss: 2.7066... 0.5532 sec/batch\nEpoch: 1/3... Training Step: 233... Training loss: 2.6796... 0.5245 sec/batch\nEpoch: 1/3... Training Step: 234... Training loss: 2.6540... 0.5021 sec/batch\nEpoch: 1/3... Training Step: 235... Training loss: 2.6614... 0.5168 sec/batch\nEpoch: 1/3... Training Step: 236... Training loss: 2.6449... 0.5177 sec/batch\nEpoch: 1/3... Training Step: 237... Training loss: 2.6534... 0.5253 sec/batch\nEpoch: 1/3... Training Step: 238... Training loss: 2.6438... 0.5362 sec/batch\nEpoch: 1/3... Training Step: 239... Training loss: 2.6266... 0.5714 sec/batch\nEpoch: 1/3... Training Step: 240... Training loss: 2.6088... 0.5194 sec/batch\nEpoch: 1/3... Training Step: 241... Training loss: 2.6285... 0.5011 sec/batch\nEpoch: 1/3... Training Step: 242... Training loss: 2.6038... 0.5217 sec/batch\nEpoch: 2/3... Training Step: 243... Training loss: 2.6573... 0.4987 sec/batch\nEpoch: 2/3... Training Step: 244... Training loss: 2.5916... 0.5150 sec/batch\nEpoch: 2/3... Training Step: 245... Training loss: 2.5477... 0.4984 sec/batch\nEpoch: 2/3... Training Step: 246... Training loss: 2.5658... 0.5082 sec/batch\nEpoch: 2/3... Training Step: 247... Training loss: 2.5518... 0.4960 sec/batch\nEpoch: 2/3... Training Step: 248... Training loss: 2.5825... 0.5165 sec/batch\nEpoch: 2/3... Training Step: 249... Training loss: 2.5567... 0.5069 sec/batch\nEpoch: 2/3... Training Step: 250... Training loss: 2.5663... 0.4926 sec/batch\nEpoch: 2/3... Training Step: 251... Training loss: 2.5484... 0.4982 sec/batch\nEpoch: 2/3... Training Step: 252... Training loss: 2.5474... 0.5027 sec/batch\nEpoch: 2/3... Training Step: 253... Training loss: 2.5844... 0.5009 sec/batch\nEpoch: 2/3... Training Step: 254... Training loss: 2.5271... 0.5787 sec/batch\nEpoch: 2/3... Training Step: 255... Training loss: 2.5323... 0.5553 sec/batch\nEpoch: 2/3... Training Step: 256... Training loss: 2.5198... 0.5342 sec/batch\nEpoch: 2/3... Training Step: 257... Training loss: 2.5359... 0.5010 sec/batch\nEpoch: 2/3... Training Step: 258... Training loss: 2.5187... 0.5044 sec/batch\nEpoch: 2/3... Training Step: 259... Training loss: 2.5017... 0.5144 sec/batch\nEpoch: 2/3... Training Step: 260... Training loss: 2.5158... 0.5222 sec/batch\nEpoch: 2/3... Training Step: 261... Training loss: 2.4998... 0.5348 sec/batch\nEpoch: 2/3... Training Step: 262... Training loss: 2.4957... 0.5350 sec/batch\nEpoch: 2/3... Training Step: 263... Training loss: 2.4719... 0.5418 sec/batch\nEpoch: 2/3... Training Step: 264... Training loss: 2.4987... 0.8725 sec/batch\nEpoch: 2/3... Training Step: 265... Training loss: 2.4917... 0.6263 sec/batch\nEpoch: 2/3... Training Step: 266... Training loss: 2.5040... 0.5268 sec/batch\nEpoch: 2/3... Training Step: 267... Training loss: 2.5027... 0.5219 sec/batch\nEpoch: 2/3... Training Step: 268... Training loss: 2.4636... 0.5528 sec/batch\nEpoch: 2/3... Training Step: 269... Training loss: 2.4655... 0.5623 sec/batch\nEpoch: 2/3... Training Step: 270... Training loss: 2.4880... 0.6635 sec/batch\nEpoch: 2/3... Training Step: 271... Training loss: 2.5139... 0.5658 sec/batch\nEpoch: 2/3... Training Step: 272... Training loss: 2.4816... 0.5540 sec/batch\nEpoch: 2/3... Training Step: 273... Training loss: 2.4702... 0.5753 sec/batch\nEpoch: 2/3... Training Step: 274... Training loss: 2.4756... 0.5352 sec/batch\nEpoch: 2/3... Training Step: 275... Training loss: 2.4949... 0.5213 sec/batch\nEpoch: 2/3... Training Step: 276... Training loss: 2.5050... 0.5301 sec/batch\nEpoch: 2/3... Training Step: 277... Training loss: 2.4932... 0.5286 sec/batch\nEpoch: 2/3... Training Step: 278... Training loss: 2.4426... 0.5850 sec/batch\nEpoch: 2/3... Training Step: 279... Training loss: 2.4636... 0.5756 sec/batch\nEpoch: 2/3... Training Step: 280... Training loss: 2.4408... 0.5385 sec/batch\nEpoch: 2/3... Training Step: 281... Training loss: 2.4649... 0.5248 sec/batch\nEpoch: 2/3... Training Step: 282... Training loss: 2.4381... 0.5635 sec/batch\nEpoch: 2/3... Training Step: 283... Training loss: 2.4357... 0.5165 sec/batch\nEpoch: 2/3... Training Step: 284... Training loss: 2.4177... 0.5651 sec/batch\nEpoch: 2/3... Training Step: 285... Training loss: 2.4532... 0.5782 sec/batch\nEpoch: 2/3... Training Step: 286... Training loss: 2.4276... 0.6456 sec/batch\nEpoch: 2/3... Training Step: 287... Training loss: 2.4419... 0.6069 sec/batch\nEpoch: 2/3... Training Step: 288... Training loss: 2.4172... 0.5552 sec/batch\nEpoch: 2/3... Training Step: 289... Training loss: 2.4218... 0.5303 sec/batch\nEpoch: 2/3... Training Step: 290... Training loss: 2.4232... 0.5356 sec/batch\nEpoch: 2/3... Training Step: 291... Training loss: 2.4210... 0.5302 sec/batch\nEpoch: 2/3... Training Step: 292... Training loss: 2.4193... 0.5057 sec/batch\nEpoch: 2/3... Training Step: 293... Training loss: 2.4354... 0.5308 sec/batch\nEpoch: 2/3... Training Step: 294... Training loss: 2.4179... 0.5288 sec/batch\nEpoch: 2/3... Training Step: 295... Training loss: 2.4059... 0.5281 sec/batch\nEpoch: 2/3... Training Step: 296... Training loss: 2.4532... 0.6025 sec/batch\nEpoch: 2/3... Training Step: 297... Training loss: 2.3959... 0.5718 sec/batch\nEpoch: 2/3... Training Step: 298... Training loss: 2.3726... 0.5160 sec/batch\nEpoch: 2/3... Training Step: 299... Training loss: 2.4056... 0.5808 sec/batch\nEpoch: 2/3... Training Step: 300... Training loss: 2.3562... 0.5973 sec/batch\nEpoch: 2/3... Training Step: 301... Training loss: 2.3654... 0.5500 sec/batch\nEpoch: 2/3... Training Step: 302... Training loss: 2.3741... 0.5429 sec/batch\nEpoch: 2/3... Training Step: 303... Training loss: 2.3708... 0.5238 sec/batch\nEpoch: 2/3... Training Step: 304... Training loss: 2.3708... 0.5634 sec/batch\nEpoch: 2/3... Training Step: 305... Training loss: 2.3956... 0.5416 sec/batch\nEpoch: 2/3... Training Step: 306... Training loss: 2.3539... 0.5331 sec/batch\nEpoch: 2/3... Training Step: 307... Training loss: 2.3797... 0.5338 sec/batch\nEpoch: 2/3... Training Step: 308... Training loss: 2.3650... 0.5250 sec/batch\nEpoch: 2/3... Training Step: 309... Training loss: 2.3444... 0.5268 sec/batch\nEpoch: 2/3... Training Step: 310... Training loss: 2.3475... 0.5318 sec/batch\nEpoch: 2/3... Training Step: 311... Training loss: 2.3727... 0.5516 sec/batch\nEpoch: 2/3... Training Step: 312... Training loss: 2.3526... 0.5314 sec/batch\nEpoch: 2/3... Training Step: 313... Training loss: 2.3594... 0.5710 sec/batch\nEpoch: 2/3... Training Step: 314... Training loss: 2.3181... 0.5563 sec/batch\nEpoch: 2/3... Training Step: 315... Training loss: 2.3397... 0.5707 sec/batch\nEpoch: 2/3... Training Step: 316... Training loss: 2.3278... 0.5729 sec/batch\nEpoch: 2/3... Training Step: 317... Training loss: 2.3207... 0.5380 sec/batch\nEpoch: 2/3... Training Step: 318... Training loss: 2.3247... 0.5191 sec/batch\nEpoch: 2/3... Training Step: 319... Training loss: 2.3338... 0.7567 sec/batch\nEpoch: 2/3... Training Step: 320... Training loss: 2.3206... 0.8630 sec/batch\nEpoch: 2/3... Training Step: 321... Training loss: 2.3348... 1.3790 sec/batch\nEpoch: 2/3... Training Step: 322... Training loss: 2.3178... 0.7209 sec/batch\nEpoch: 2/3... Training Step: 323... Training loss: 2.3502... 0.5994 sec/batch\nEpoch: 2/3... Training Step: 324... Training loss: 2.3673... 0.6223 sec/batch\nEpoch: 2/3... Training Step: 325... Training loss: 2.3271... 0.6655 sec/batch\nEpoch: 2/3... Training Step: 326... Training loss: 2.3190... 0.6613 sec/batch\nEpoch: 2/3... Training Step: 327... Training loss: 2.3121... 0.5743 sec/batch\nEpoch: 2/3... Training Step: 328... Training loss: 2.3016... 0.5944 sec/batch\nEpoch: 2/3... Training Step: 329... Training loss: 2.3538... 0.5809 sec/batch\nEpoch: 2/3... Training Step: 330... Training loss: 2.3068... 0.6070 sec/batch\nEpoch: 2/3... Training Step: 331... Training loss: 2.3426... 0.5959 sec/batch\nEpoch: 2/3... Training Step: 332... Training loss: 2.2900... 0.5943 sec/batch\nEpoch: 2/3... Training Step: 333... Training loss: 2.2629... 0.5865 sec/batch\nEpoch: 2/3... Training Step: 334... Training loss: 2.3039... 0.5832 sec/batch\nEpoch: 2/3... Training Step: 335... Training loss: 2.2826... 0.5822 sec/batch\nEpoch: 2/3... Training Step: 336... Training loss: 2.2933... 0.8881 sec/batch\nEpoch: 2/3... Training Step: 337... Training loss: 2.3136... 0.5998 sec/batch\nEpoch: 2/3... Training Step: 338... Training loss: 2.2825... 0.5505 sec/batch\nEpoch: 2/3... Training Step: 339... Training loss: 2.3044... 0.5704 sec/batch\nEpoch: 2/3... Training Step: 340... Training loss: 2.2478... 0.6182 sec/batch\nEpoch: 2/3... Training Step: 341... Training loss: 2.2687... 0.6129 sec/batch\nEpoch: 2/3... Training Step: 342... Training loss: 2.2556... 0.5623 sec/batch\nEpoch: 2/3... Training Step: 343... Training loss: 2.2510... 0.5510 sec/batch\nEpoch: 2/3... Training Step: 344... Training loss: 2.2570... 0.5276 sec/batch\nEpoch: 2/3... Training Step: 345... Training loss: 2.2692... 0.5238 sec/batch\nEpoch: 2/3... Training Step: 346... Training loss: 2.2672... 0.5633 sec/batch\nEpoch: 2/3... Training Step: 347... Training loss: 2.2261... 0.6164 sec/batch\nEpoch: 2/3... Training Step: 348... Training loss: 2.2586... 1.5226 sec/batch\nEpoch: 2/3... Training Step: 349... Training loss: 2.2799... 0.7989 sec/batch\nEpoch: 2/3... Training Step: 350... Training loss: 2.2671... 0.6831 sec/batch\nEpoch: 2/3... Training Step: 351... Training loss: 2.2187... 0.6488 sec/batch\nEpoch: 2/3... Training Step: 352... Training loss: 2.2438... 0.6041 sec/batch\nEpoch: 2/3... Training Step: 353... Training loss: 2.2259... 0.5381 sec/batch\nEpoch: 2/3... Training Step: 354... Training loss: 2.2514... 0.5396 sec/batch\nEpoch: 2/3... Training Step: 355... Training loss: 2.2320... 0.6262 sec/batch\nEpoch: 2/3... Training Step: 356... Training loss: 2.2345... 0.5793 sec/batch\nEpoch: 2/3... Training Step: 357... Training loss: 2.2616... 0.5617 sec/batch\nEpoch: 2/3... Training Step: 358... Training loss: 2.2356... 0.5320 sec/batch\nEpoch: 2/3... Training Step: 359... Training loss: 2.2335... 0.5832 sec/batch\nEpoch: 2/3... Training Step: 360... Training loss: 2.2128... 0.6264 sec/batch\nEpoch: 2/3... Training Step: 361... Training loss: 2.2365... 0.6484 sec/batch\nEpoch: 2/3... Training Step: 362... Training loss: 2.2502... 0.5969 sec/batch\nEpoch: 2/3... Training Step: 363... Training loss: 2.2046... 0.5561 sec/batch\nEpoch: 2/3... Training Step: 364... Training loss: 2.2006... 0.5567 sec/batch\nEpoch: 2/3... Training Step: 365... Training loss: 2.2559... 0.5442 sec/batch\nEpoch: 2/3... Training Step: 366... Training loss: 2.2188... 0.5630 sec/batch\nEpoch: 2/3... Training Step: 367... Training loss: 2.2290... 0.5454 sec/batch\nEpoch: 2/3... Training Step: 368... Training loss: 2.2385... 0.5536 sec/batch\nEpoch: 2/3... Training Step: 369... Training loss: 2.1787... 0.5250 sec/batch\nEpoch: 2/3... Training Step: 370... Training loss: 2.2250... 0.5592 sec/batch\nEpoch: 2/3... Training Step: 371... Training loss: 2.1939... 0.5329 sec/batch\nEpoch: 2/3... Training Step: 372... Training loss: 2.1905... 0.6303 sec/batch\nEpoch: 2/3... Training Step: 373... Training loss: 2.1891... 0.5889 sec/batch\nEpoch: 2/3... Training Step: 374... Training loss: 2.1968... 0.5413 sec/batch\nEpoch: 2/3... Training Step: 375... Training loss: 2.1700... 0.5418 sec/batch\nEpoch: 2/3... Training Step: 376... Training loss: 2.1881... 0.5665 sec/batch\nEpoch: 2/3... Training Step: 377... Training loss: 2.1693... 0.6130 sec/batch\nEpoch: 2/3... Training Step: 378... Training loss: 2.2040... 0.5311 sec/batch\nEpoch: 2/3... Training Step: 379... Training loss: 2.1461... 0.5158 sec/batch\nEpoch: 2/3... Training Step: 380... Training loss: 2.1436... 0.5325 sec/batch\nEpoch: 2/3... Training Step: 381... Training loss: 2.1492... 0.5185 sec/batch\nEpoch: 2/3... Training Step: 382... Training loss: 2.1574... 0.5402 sec/batch\nEpoch: 2/3... Training Step: 383... Training loss: 2.1617... 0.5286 sec/batch\nEpoch: 2/3... Training Step: 384... Training loss: 2.1457... 0.5558 sec/batch\nEpoch: 2/3... Training Step: 385... Training loss: 2.2023... 0.5674 sec/batch\nEpoch: 2/3... Training Step: 386... Training loss: 2.1515... 0.5774 sec/batch\nEpoch: 2/3... Training Step: 387... Training loss: 2.1884... 0.5430 sec/batch\nEpoch: 2/3... Training Step: 388... Training loss: 2.1541... 0.5644 sec/batch\nEpoch: 2/3... Training Step: 389... Training loss: 2.1331... 0.5649 sec/batch\nEpoch: 2/3... Training Step: 390... Training loss: 2.1630... 0.5706 sec/batch\nEpoch: 2/3... Training Step: 391... Training loss: 2.1415... 0.5470 sec/batch\nEpoch: 2/3... Training Step: 392... Training loss: 2.1857... 0.6315 sec/batch\nEpoch: 2/3... Training Step: 393... Training loss: 2.1743... 0.6039 sec/batch\nEpoch: 2/3... Training Step: 394... Training loss: 2.1630... 0.5713 sec/batch\nEpoch: 2/3... Training Step: 395... Training loss: 2.1516... 0.5173 sec/batch\nEpoch: 2/3... Training Step: 396... Training loss: 2.1406... 0.5330 sec/batch\nEpoch: 2/3... Training Step: 397... Training loss: 2.1348... 0.5364 sec/batch\nEpoch: 2/3... Training Step: 398... Training loss: 2.1101... 0.5166 sec/batch\nEpoch: 2/3... Training Step: 399... Training loss: 2.1272... 0.5315 sec/batch\nEpoch: 2/3... Training Step: 400... Training loss: 2.1313... 0.5168 sec/batch\nEpoch: 2/3... Training Step: 401... Training loss: 2.1428... 0.5077 sec/batch\nEpoch: 2/3... Training Step: 402... Training loss: 2.1242... 0.5076 sec/batch\nEpoch: 2/3... Training Step: 403... Training loss: 2.1104... 0.5166 sec/batch\nEpoch: 2/3... Training Step: 404... Training loss: 2.1432... 0.5225 sec/batch\nEpoch: 2/3... Training Step: 405... Training loss: 2.1205... 0.5155 sec/batch\nEpoch: 2/3... Training Step: 406... Training loss: 2.1056... 0.5007 sec/batch\nEpoch: 2/3... Training Step: 407... Training loss: 2.1128... 0.5159 sec/batch\nEpoch: 2/3... Training Step: 408... Training loss: 2.1303... 0.5154 sec/batch\nEpoch: 2/3... Training Step: 409... Training loss: 2.1528... 0.5224 sec/batch\nEpoch: 2/3... Training Step: 410... Training loss: 2.1102... 0.5323 sec/batch\nEpoch: 2/3... Training Step: 411... Training loss: 2.1207... 0.5552 sec/batch\nEpoch: 2/3... Training Step: 412... Training loss: 2.1493... 0.5519 sec/batch\nEpoch: 2/3... Training Step: 413... Training loss: 2.0765... 0.5261 sec/batch\nEpoch: 2/3... Training Step: 414... Training loss: 2.1198... 0.5295 sec/batch\nEpoch: 2/3... Training Step: 415... Training loss: 2.1163... 0.5361 sec/batch\nEpoch: 2/3... Training Step: 416... Training loss: 2.0924... 0.5535 sec/batch\nEpoch: 2/3... Training Step: 417... Training loss: 2.1032... 0.5324 sec/batch\nEpoch: 2/3... Training Step: 418... Training loss: 2.0813... 0.5695 sec/batch\nEpoch: 2/3... Training Step: 419... Training loss: 2.1038... 0.5372 sec/batch\nEpoch: 2/3... Training Step: 420... Training loss: 2.0747... 0.5614 sec/batch\nEpoch: 2/3... Training Step: 421... Training loss: 2.0939... 0.5168 sec/batch\nEpoch: 2/3... Training Step: 422... Training loss: 2.0893... 0.5299 sec/batch\nEpoch: 2/3... Training Step: 423... Training loss: 2.0819... 0.5185 sec/batch\nEpoch: 2/3... Training Step: 424... Training loss: 2.0860... 0.5323 sec/batch\nEpoch: 2/3... Training Step: 425... Training loss: 2.0971... 0.5053 sec/batch\nEpoch: 2/3... Training Step: 426... Training loss: 2.1053... 0.5287 sec/batch\nEpoch: 2/3... Training Step: 427... Training loss: 2.1047... 0.5173 sec/batch\nEpoch: 2/3... Training Step: 428... Training loss: 2.0757... 0.5203 sec/batch\nEpoch: 2/3... Training Step: 429... Training loss: 2.0848... 0.5171 sec/batch\nEpoch: 2/3... Training Step: 430... Training loss: 2.1258... 0.5576 sec/batch\nEpoch: 2/3... Training Step: 431... Training loss: 2.0898... 0.5229 sec/batch\nEpoch: 2/3... Training Step: 432... Training loss: 2.1237... 0.5070 sec/batch\nEpoch: 2/3... Training Step: 433... Training loss: 2.0806... 0.5316 sec/batch\nEpoch: 2/3... Training Step: 434... Training loss: 2.0727... 0.5376 sec/batch\nEpoch: 2/3... Training Step: 435... Training loss: 2.0607... 0.5163 sec/batch\nEpoch: 2/3... Training Step: 436... Training loss: 2.0716... 0.5409 sec/batch\nEpoch: 2/3... Training Step: 437... Training loss: 2.0499... 0.5789 sec/batch\nEpoch: 2/3... Training Step: 438... Training loss: 2.0704... 0.5536 sec/batch\nEpoch: 2/3... Training Step: 439... Training loss: 2.0768... 0.5320 sec/batch\nEpoch: 2/3... Training Step: 440... Training loss: 2.0657... 0.5384 sec/batch\nEpoch: 2/3... Training Step: 441... Training loss: 2.1020... 0.5608 sec/batch\nEpoch: 2/3... Training Step: 442... Training loss: 2.1037... 0.6280 sec/batch\nEpoch: 2/3... Training Step: 443... Training loss: 2.0824... 0.5259 sec/batch\nEpoch: 2/3... Training Step: 444... Training loss: 2.1136... 0.5053 sec/batch\nEpoch: 2/3... Training Step: 445... Training loss: 2.1537... 0.5317 sec/batch\nEpoch: 2/3... Training Step: 446... Training loss: 2.1541... 0.5186 sec/batch\nEpoch: 2/3... Training Step: 447... Training loss: 2.1027... 0.5304 sec/batch\nEpoch: 2/3... Training Step: 448... Training loss: 2.0515... 0.5204 sec/batch\nEpoch: 2/3... Training Step: 449... Training loss: 2.0443... 0.5599 sec/batch\nEpoch: 2/3... Training Step: 450... Training loss: 2.0504... 0.5807 sec/batch\nEpoch: 2/3... Training Step: 451... Training loss: 2.0040... 0.5536 sec/batch\nEpoch: 2/3... Training Step: 452... Training loss: 2.0325... 0.5472 sec/batch\nEpoch: 2/3... Training Step: 453... Training loss: 2.0352... 0.5325 sec/batch\nEpoch: 2/3... Training Step: 454... Training loss: 2.0822... 0.5271 sec/batch\nEpoch: 2/3... Training Step: 455... Training loss: 2.0987... 0.5196 sec/batch\nEpoch: 2/3... Training Step: 456... Training loss: 2.0028... 0.5213 sec/batch\nEpoch: 2/3... Training Step: 457... Training loss: 2.0156... 0.5164 sec/batch\nEpoch: 2/3... Training Step: 458... Training loss: 2.0488... 0.5323 sec/batch\nEpoch: 2/3... Training Step: 459... Training loss: 2.0403... 0.5155 sec/batch\nEpoch: 2/3... Training Step: 460... Training loss: 2.0284... 0.5204 sec/batch\nEpoch: 2/3... Training Step: 461... Training loss: 2.0202... 0.5184 sec/batch\nEpoch: 2/3... Training Step: 462... Training loss: 2.0291... 0.5315 sec/batch\nEpoch: 2/3... Training Step: 463... Training loss: 1.9876... 0.5322 sec/batch\nEpoch: 2/3... Training Step: 464... Training loss: 2.0663... 0.5377 sec/batch\nEpoch: 2/3... Training Step: 465... Training loss: 2.0348... 0.5322 sec/batch\nEpoch: 2/3... Training Step: 466... Training loss: 2.0283... 0.5161 sec/batch\nEpoch: 2/3... Training Step: 467... Training loss: 2.0191... 0.5376 sec/batch\nEpoch: 2/3... Training Step: 468... Training loss: 2.0212... 0.5299 sec/batch\nEpoch: 2/3... Training Step: 469... Training loss: 2.0403... 0.5321 sec/batch\nEpoch: 2/3... Training Step: 470... Training loss: 2.0316... 0.5206 sec/batch\nEpoch: 2/3... Training Step: 471... Training loss: 2.0727... 0.5338 sec/batch\nEpoch: 2/3... Training Step: 472... Training loss: 2.0291... 0.5377 sec/batch\nEpoch: 2/3... Training Step: 473... Training loss: 2.0153... 0.5239 sec/batch\nEpoch: 2/3... Training Step: 474... Training loss: 2.0460... 0.5332 sec/batch\nEpoch: 2/3... Training Step: 475... Training loss: 2.0189... 0.5194 sec/batch\nEpoch: 2/3... Training Step: 476... Training loss: 2.0089... 0.5343 sec/batch\nEpoch: 2/3... Training Step: 477... Training loss: 2.0217... 0.5327 sec/batch\nEpoch: 2/3... Training Step: 478... Training loss: 1.9985... 0.5251 sec/batch\nEpoch: 2/3... Training Step: 479... Training loss: 2.0191... 0.5187 sec/batch\nEpoch: 2/3... Training Step: 480... Training loss: 2.0477... 0.5198 sec/batch\nEpoch: 2/3... Training Step: 481... Training loss: 2.0033... 0.5161 sec/batch\nEpoch: 2/3... Training Step: 482... Training loss: 1.9717... 0.5219 sec/batch\nEpoch: 2/3... Training Step: 483... Training loss: 2.0184... 0.5413 sec/batch\nEpoch: 2/3... Training Step: 484... Training loss: 1.9607... 0.5210 sec/batch\nEpoch: 3/3... Training Step: 485... Training loss: 2.0466... 0.5328 sec/batch\nEpoch: 3/3... Training Step: 486... Training loss: 1.9957... 0.5208 sec/batch\nEpoch: 3/3... Training Step: 487... Training loss: 1.9402... 0.5302 sec/batch\nEpoch: 3/3... Training Step: 488... Training loss: 1.9465... 0.5385 sec/batch\nEpoch: 3/3... Training Step: 489... Training loss: 1.9554... 0.5155 sec/batch\nEpoch: 3/3... Training Step: 490... Training loss: 2.0180... 0.5133 sec/batch\nEpoch: 3/3... Training Step: 491... Training loss: 1.9810... 0.5407 sec/batch\nEpoch: 3/3... Training Step: 492... Training loss: 2.0053... 0.5129 sec/batch\nEpoch: 3/3... Training Step: 493... Training loss: 1.9819... 0.5352 sec/batch\nEpoch: 3/3... Training Step: 494... Training loss: 1.9702... 0.5774 sec/batch\nEpoch: 3/3... Training Step: 495... Training loss: 2.0152... 0.5380 sec/batch\nEpoch: 3/3... Training Step: 496... Training loss: 1.9679... 0.5779 sec/batch\nEpoch: 3/3... Training Step: 497... Training loss: 1.9693... 0.5910 sec/batch\nEpoch: 3/3... Training Step: 498... Training loss: 1.9437... 0.5742 sec/batch\nEpoch: 3/3... Training Step: 499... Training loss: 1.9693... 0.5507 sec/batch\nEpoch: 3/3... Training Step: 500... Training loss: 1.9879... 0.5325 sec/batch\nEpoch: 3/3... Training Step: 501... Training loss: 1.9627... 0.5412 sec/batch\nEpoch: 3/3... Training Step: 502... Training loss: 1.9588... 0.5390 sec/batch\nEpoch: 3/3... Training Step: 503... Training loss: 1.9501... 0.5454 sec/batch\nEpoch: 3/3... Training Step: 504... Training loss: 1.9396... 0.5475 sec/batch\nEpoch: 3/3... Training Step: 505... Training loss: 1.9307... 0.5832 sec/batch\nEpoch: 3/3... Training Step: 506... Training loss: 1.9543... 0.5487 sec/batch\nEpoch: 3/3... Training Step: 507... Training loss: 1.9593... 0.5757 sec/batch\nEpoch: 3/3... Training Step: 508... Training loss: 1.9705... 0.5579 sec/batch\nEpoch: 3/3... Training Step: 509... Training loss: 1.9353... 0.5710 sec/batch\nEpoch: 3/3... Training Step: 510... Training loss: 1.8865... 0.5340 sec/batch\nEpoch: 3/3... Training Step: 511... Training loss: 1.9244... 0.5348 sec/batch\nEpoch: 3/3... Training Step: 512... Training loss: 1.9651... 0.5326 sec/batch\nEpoch: 3/3... Training Step: 513... Training loss: 1.9735... 0.5502 sec/batch\nEpoch: 3/3... Training Step: 514... Training loss: 1.9274... 0.5316 sec/batch\nEpoch: 3/3... Training Step: 515... Training loss: 1.9533... 0.5343 sec/batch\nEpoch: 3/3... Training Step: 516... Training loss: 1.9619... 0.5327 sec/batch\nEpoch: 3/3... Training Step: 517... Training loss: 1.9787... 0.5323 sec/batch\nEpoch: 3/3... Training Step: 518... Training loss: 1.9699... 0.5605 sec/batch\nEpoch: 3/3... Training Step: 519... Training loss: 1.9681... 0.5499 sec/batch\nEpoch: 3/3... Training Step: 520... Training loss: 1.9334... 0.5269 sec/batch\nEpoch: 3/3... Training Step: 521... Training loss: 1.9364... 0.5501 sec/batch\nEpoch: 3/3... Training Step: 522... Training loss: 1.9115... 0.5677 sec/batch\nEpoch: 3/3... Training Step: 523... Training loss: 1.9445... 0.5478 sec/batch\nEpoch: 3/3... Training Step: 524... Training loss: 1.9407... 0.5367 sec/batch\nEpoch: 3/3... Training Step: 525... Training loss: 1.9080... 0.5321 sec/batch\nEpoch: 3/3... Training Step: 526... Training loss: 1.9185... 0.5516 sec/batch\nEpoch: 3/3... Training Step: 527... Training loss: 1.9238... 0.5668 sec/batch\nEpoch: 3/3... Training Step: 528... Training loss: 1.9223... 0.5542 sec/batch\nEpoch: 3/3... Training Step: 529... Training loss: 1.9358... 0.5698 sec/batch\nEpoch: 3/3... Training Step: 530... Training loss: 1.9019... 0.5600 sec/batch\nEpoch: 3/3... Training Step: 531... Training loss: 1.9108... 0.5374 sec/batch\nEpoch: 3/3... Training Step: 532... Training loss: 1.9062... 0.5180 sec/batch\nEpoch: 3/3... Training Step: 533... Training loss: 1.9078... 0.5320 sec/batch\nEpoch: 3/3... Training Step: 534... Training loss: 1.9188... 0.5507 sec/batch\nEpoch: 3/3... Training Step: 535... Training loss: 1.9293... 0.5485 sec/batch\nEpoch: 3/3... Training Step: 536... Training loss: 1.9148... 0.5543 sec/batch\nEpoch: 3/3... Training Step: 537... Training loss: 1.9361... 0.5654 sec/batch\nEpoch: 3/3... Training Step: 538... Training loss: 1.9580... 0.5802 sec/batch\nEpoch: 3/3... Training Step: 539... Training loss: 1.9507... 0.5385 sec/batch\nEpoch: 3/3... Training Step: 540... Training loss: 1.8964... 0.5654 sec/batch\nEpoch: 3/3... Training Step: 541... Training loss: 1.9274... 0.5699 sec/batch\nEpoch: 3/3... Training Step: 542... Training loss: 1.8837... 0.5470 sec/batch\nEpoch: 3/3... Training Step: 543... Training loss: 1.9146... 0.5371 sec/batch\nEpoch: 3/3... Training Step: 544... Training loss: 1.9144... 0.5315 sec/batch\nEpoch: 3/3... Training Step: 545... Training loss: 1.8953... 0.5345 sec/batch\nEpoch: 3/3... Training Step: 546... Training loss: 1.8988... 0.5513 sec/batch\nEpoch: 3/3... Training Step: 547... Training loss: 1.9225... 0.5770 sec/batch\nEpoch: 3/3... Training Step: 548... Training loss: 1.8931... 0.5647 sec/batch\nEpoch: 3/3... Training Step: 549... Training loss: 1.9206... 0.5621 sec/batch\nEpoch: 3/3... Training Step: 550... Training loss: 1.9221... 0.5896 sec/batch\nEpoch: 3/3... Training Step: 551... Training loss: 1.9064... 0.5682 sec/batch\nEpoch: 3/3... Training Step: 552... Training loss: 1.9229... 0.5830 sec/batch\nEpoch: 3/3... Training Step: 553... Training loss: 1.8975... 0.6209 sec/batch\nEpoch: 3/3... Training Step: 554... Training loss: 1.9047... 0.6185 sec/batch\nEpoch: 3/3... Training Step: 555... Training loss: 1.8871... 0.5784 sec/batch\nEpoch: 3/3... Training Step: 556... Training loss: 1.8622... 0.5948 sec/batch\nEpoch: 3/3... Training Step: 557... Training loss: 1.9102... 0.5930 sec/batch\nEpoch: 3/3... Training Step: 558... Training loss: 1.8646... 0.5835 sec/batch\nEpoch: 3/3... Training Step: 559... Training loss: 1.8687... 0.5815 sec/batch\nEpoch: 3/3... Training Step: 560... Training loss: 1.8826... 0.5690 sec/batch\nEpoch: 3/3... Training Step: 561... Training loss: 1.8837... 0.5770 sec/batch\nEpoch: 3/3... Training Step: 562... Training loss: 1.8792... 0.5800 sec/batch\nEpoch: 3/3... Training Step: 563... Training loss: 1.8989... 0.6180 sec/batch\nEpoch: 3/3... Training Step: 564... Training loss: 1.9063... 0.5970 sec/batch\nEpoch: 3/3... Training Step: 565... Training loss: 1.9050... 0.5765 sec/batch\nEpoch: 3/3... Training Step: 566... Training loss: 1.9372... 0.6180 sec/batch\nEpoch: 3/3... Training Step: 567... Training loss: 1.8835... 0.5920 sec/batch\nEpoch: 3/3... Training Step: 568... Training loss: 1.8927... 0.5845 sec/batch\nEpoch: 3/3... Training Step: 569... Training loss: 1.8623... 0.6130 sec/batch\nEpoch: 3/3... Training Step: 570... Training loss: 1.8533... 0.5725 sec/batch\nEpoch: 3/3... Training Step: 571... Training loss: 1.9385... 0.6110 sec/batch\nEpoch: 3/3... Training Step: 572... Training loss: 1.8603... 0.6045 sec/batch\nEpoch: 3/3... Training Step: 573... Training loss: 1.9176... 0.5975 sec/batch\nEpoch: 3/3... Training Step: 574... Training loss: 1.8625... 0.7265 sec/batch\nEpoch: 3/3... Training Step: 575... Training loss: 1.8673... 0.7735 sec/batch\nEpoch: 3/3... Training Step: 576... Training loss: 1.8929... 0.7295 sec/batch\nEpoch: 3/3... Training Step: 577... Training loss: 1.8916... 0.8330 sec/batch\nEpoch: 3/3... Training Step: 578... Training loss: 1.8727... 0.7815 sec/batch\nEpoch: 3/3... Training Step: 579... Training loss: 1.9076... 0.7620 sec/batch\nEpoch: 3/3... Training Step: 580... Training loss: 1.8770... 0.5940 sec/batch\nEpoch: 3/3... Training Step: 581... Training loss: 1.9127... 0.5800 sec/batch\nEpoch: 3/3... Training Step: 582... Training loss: 1.8547... 0.5745 sec/batch\nEpoch: 3/3... Training Step: 583... Training loss: 1.8849... 0.5715 sec/batch\nEpoch: 3/3... Training Step: 584... Training loss: 1.8392... 0.5655 sec/batch\nEpoch: 3/3... Training Step: 585... Training loss: 1.8518... 0.5705 sec/batch\nEpoch: 3/3... Training Step: 586... Training loss: 1.8531... 0.5770 sec/batch\nEpoch: 3/3... Training Step: 587... Training loss: 1.8444... 0.5395 sec/batch\nEpoch: 3/3... Training Step: 588... Training loss: 1.8727... 0.5510 sec/batch\nEpoch: 3/3... Training Step: 589... Training loss: 1.8440... 0.5510 sec/batch\nEpoch: 3/3... Training Step: 590... Training loss: 1.8541... 0.5405 sec/batch\nEpoch: 3/3... Training Step: 591... Training loss: 1.9146... 0.5455 sec/batch\nEpoch: 3/3... Training Step: 592... Training loss: 1.8878... 0.5490 sec/batch\nEpoch: 3/3... Training Step: 593... Training loss: 1.8151... 0.5570 sec/batch\nEpoch: 3/3... Training Step: 594... Training loss: 1.8465... 0.5540 sec/batch\nEpoch: 3/3... Training Step: 595... Training loss: 1.8211... 0.5550 sec/batch\nEpoch: 3/3... Training Step: 596... Training loss: 1.8825... 0.5600 sec/batch\nEpoch: 3/3... Training Step: 597... Training loss: 1.8370... 0.5675 sec/batch\nEpoch: 3/3... Training Step: 598... Training loss: 1.8625... 0.5407 sec/batch\nEpoch: 3/3... Training Step: 599... Training loss: 1.8895... 0.5650 sec/batch\n" ] ], [ [ "#### Saved checkpoints\n\nRead up on saving and loading checkpoints here: https://www.tensorflow.org/programmers_guide/variables", "_____no_output_____" ] ], [ [ "tf.train.get_checkpoint_state('checkpoints')", "_____no_output_____" ] ], [ [ "## Sampling\n\nNow that the network is trained, we'll can use it to generate new text. The idea is that we pass in a character, then the network will predict the next character. We can use the new one, to predict the next one. And we keep doing this to generate all new text. I also included some functionality to prime the network with some text by passing in a string and building up a state from that.\n\nThe network gives us predictions for each character. To reduce noise and make things a little less random, I'm going to only choose a new character from the top N most likely characters.\n\n", "_____no_output_____" ] ], [ [ "def pick_top_n(preds, vocab_size, top_n=5):\n p = np.squeeze(preds)\n p[np.argsort(p)[:-top_n]] = 0\n p = p / np.sum(p)\n c = np.random.choice(vocab_size, 1, p=p)[0]\n return c", "_____no_output_____" ], [ "def sample(checkpoint, n_samples, lstm_size, vocab_size, prime=\"The \"):\n samples = [c for c in prime]\n model = CharRNN(len(vocab), lstm_size=lstm_size, sampling=True)\n saver = tf.train.Saver()\n with tf.Session() as sess:\n saver.restore(sess, checkpoint)\n new_state = sess.run(model.initial_state)\n for c in prime:\n x = np.zeros((1, 1))\n x[0,0] = vocab_to_int[c]\n feed = {model.inputs: x,\n model.keep_prob: 1.,\n model.initial_state: new_state}\n preds, new_state = sess.run([model.prediction, model.final_state], \n feed_dict=feed)\n\n c = pick_top_n(preds, len(vocab))\n samples.append(int_to_vocab[c])\n\n for i in range(n_samples):\n x[0,0] = c\n feed = {model.inputs: x,\n model.keep_prob: 1.,\n model.initial_state: new_state}\n preds, new_state = sess.run([model.prediction, model.final_state], \n feed_dict=feed)\n\n c = pick_top_n(preds, len(vocab))\n samples.append(int_to_vocab[c])\n \n return ''.join(samples)", "_____no_output_____" ] ], [ [ "Here, pass in the path to a checkpoint and sample from the network.", "_____no_output_____" ] ], [ [ "tf.train.latest_checkpoint('checkpoints')", "_____no_output_____" ], [ "checkpoint = tf.train.latest_checkpoint('checkpoints')\nsamp = sample(checkpoint, 2000, lstm_size, len(vocab), prime=\"Far\")\nprint(samp)", "_____no_output_____" ], [ "checkpoint = 'checkpoints/i200_l512.ckpt'\nsamp = sample(checkpoint, 1000, lstm_size, len(vocab), prime=\"Far\")\nprint(samp)", "_____no_output_____" ], [ "checkpoint = 'checkpoints/i600_l512.ckpt'\nsamp = sample(checkpoint, 1000, lstm_size, len(vocab), prime=\"Far\")\nprint(samp)", "_____no_output_____" ], [ "checkpoint = 'checkpoints/i1200_l512.ckpt'\nsamp = sample(checkpoint, 1000, lstm_size, len(vocab), prime=\"Far\")\nprint(samp)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
c53061855d53424dab8688540c3e3d23bd692864
128,739
ipynb
Jupyter Notebook
Workshop_Coding/The pandas library.ipynb
chrisbilliard/python
462aeb5230bc34a66c87079f2ec0e216e1c9074c
[ "MIT" ]
2
2018-04-23T11:01:29.000Z
2018-04-25T12:51:06.000Z
Workshop_Coding/The pandas library.ipynb
CreateCodeLearn/data-science-track
ad679fc35a987264d9bd4974b74dc6a4f4cb923f
[ "MIT" ]
null
null
null
Workshop_Coding/The pandas library.ipynb
CreateCodeLearn/data-science-track
ad679fc35a987264d9bd4974b74dc6a4f4cb923f
[ "MIT" ]
null
null
null
35.475062
13,784
0.544668
[ [ [ "# The Python ecosystem - The pandas library", "_____no_output_____" ], [ "The [pandas library](https://pandas.pydata.org/) was created by [Wes McKinney](http://wesmckinney.com/) in 2010. pandas provides **data structures** and **functions** \nfor manipulating, processing, cleaning and crunching data. In the Python ecosystem pandas is the state-of-the-art tool for working with tabular or spreadsheet-like data in which each column may be a different type (`string`, `numeric`, `date`, or otherwise). pandas provides sophisticated indexing functionality to make it easy to reshape, slice and dice, perform aggregations, and select subsets of data. pandas relies on other packages, such as [NumPy](http://www.numpy.org/) and [SciPy](https://scipy.org/scipylib/index.html). \nFurther pandas integrates [matplotlib](https://matplotlib.org/) for plotting. \n\nIf you are new to pandas we strongly recommend to visit the very well written [__pandas tutorials__](https://pandas.pydata.org/pandas-docs/stable/tutorials.html), which cover all relevant sections for new users to properly get started.\n\n\nOnce installed (for details refer to the [documentation](https://pandas.pydata.org/pandas-docs/stable/install.html)), pandas is imported by using the canonical alias `pd`.", "_____no_output_____" ] ], [ [ "import pandas as pd", "_____no_output_____" ] ], [ [ "The pandas library has two workhorse data structures: __*Series*__ and __*DataFrame*__.\n\n* one dimensional `pd.Series` object\n* two dimensional `pd.DataFrame` object", "_____no_output_____" ], [ "***\n\n## The `pd.Series` object", "_____no_output_____" ], [ "Data generation", "_____no_output_____" ] ], [ [ "# import the random module from numpy\nfrom numpy import random \n# set seed for reproducibility\nrandom.seed(123) \n# generate 26 random integers between -10 and 10\nmy_data = random.randint(low=-10, high=10, size=26)\n# print the data\nmy_data", "_____no_output_____" ] ], [ [ "A Series is a one-dimensional array-like object containing an array of data and an associated array of data labels, called its _index_. We create a `pd.Series` object by calling the `pd.Series()` function. ", "_____no_output_____" ] ], [ [ "# Uncomment to look up the documentation\n# ?pd.Series # docstring\n# ??pd.Series # source", "_____no_output_____" ], [ "# create a pd.Series object\ns = pd.Series(data=my_data)\ns", "_____no_output_____" ], [ "type(s)", "_____no_output_____" ] ], [ [ "***\n\n### `pd.Series` attributes\n\nPython objects in general and the `pd.Series` in particular offer useful object-specific *attributes*.\n\n* _attribute_ $\\to$ `OBJECT.attribute` $\\qquad$ _Note that the attribute is called without parenthesis_", "_____no_output_____" ] ], [ [ "s.dtypes", "_____no_output_____" ], [ "s.index", "_____no_output_____" ] ], [ [ "We can use the `index` attribute to assign an index to a `pd.Series` object.\n\nConsider the letters of the alphabet....", "_____no_output_____" ] ], [ [ "import string\nletters = string.ascii_uppercase\nletters", "_____no_output_____" ] ], [ [ "By providing an array-type object we assign a new index to the `pd.Series` object.", "_____no_output_____" ] ], [ [ "s.index = [l for l in letters]", "_____no_output_____" ], [ "s.index", "_____no_output_____" ], [ "s", "_____no_output_____" ] ], [ [ "***\n### `pd.Series` methods\n\nMethods are functions that are called using the attribute notation. Hence they are called by appending a dot (`.`) to the Python object, followed by the name of the method, parentheses `()` and in case one or more arguments (`arg`). \n\n* _method_ $\\to$ `OBJECT.method_name(arg1, arg2, ...)`", "_____no_output_____" ] ], [ [ "s.sum()", "_____no_output_____" ], [ "s.mean()", "_____no_output_____" ], [ "s.max()", "_____no_output_____" ], [ "s.min()", "_____no_output_____" ], [ "s.median()", "_____no_output_____" ], [ "s.quantile(q=0.5)", "_____no_output_____" ], [ "s.quantile(q=[0.25, 0.5, 0.75])", "_____no_output_____" ] ], [ [ "***\n### Element-wise arithmetic\n\n\nA very useful feature of `pd.Series` objects is that we may apply arithmetic operations *element-wise*.", "_____no_output_____" ] ], [ [ "s*0.1\n#s+10\n#10/s\n#s**2\n#(2+s)*1**3", "_____no_output_____" ] ], [ [ "***\n### Selection and Indexing\n\nAnother main data operation is indexing and selecting particular subsets of the data object. pandas comes with a very [rich set of methods](https://pandas.pydata.org/pandas-docs/stable/indexing.html) for these type of tasks. \n\nIn its simplest form we index a Series numpy-like, by using the `[]` operator to select a particular `index` of the Series.", "_____no_output_____" ] ], [ [ "s[3]", "_____no_output_____" ], [ "s[2:6]", "_____no_output_____" ], [ "s[\"C\"]", "_____no_output_____" ], [ "s[\"C\":\"K\"]", "_____no_output_____" ] ], [ [ "***\n\n## The `pd.DataFrame` object\n\nThe primary pandas data structure is the `DataFrame`. It is a two-dimensional size-mutable, potentially heterogeneous tabular data structure with both row and column labels. Arithmetic operations align on both row and column labels. Basically, the `DataFrame` can be thought of as a `dictionary`-like container for Series objects. \n\n\n", "_____no_output_____" ], [ "**Generate a `DataFrame` object from scratch** \n\npandas facilitates the import of different data types and sources, however, for the sake of this tutorial we generate a `DataFrame` object from scratch. \n\nSource: http://duelingdata.blogspot.de/2016/01/the-beatles.html", "_____no_output_____" ] ], [ [ "df = pd.DataFrame({\"id\" : range(1,5),\n \"Name\" : [\"John\", \"Paul\", \"George\", \"Ringo\"],\n \"Last Name\" : [\"Lennon\", \"McCartney\", \"Harrison\", \"Star\"],\n \"dead\" : [True, False, True, False],\n \"year_born\" : [1940, 1942, 1943, 1940],\n \"no_of_songs\" : [62, 58, 24, 3]\n })\ndf", "_____no_output_____" ] ], [ [ "***\n### `pd.DataFrame` attributes", "_____no_output_____" ] ], [ [ "df.dtypes", "_____no_output_____" ], [ "# axis 0\ndf.columns", "_____no_output_____" ], [ "# axis 1\ndf.index", "_____no_output_____" ] ], [ [ "***\n### `pd.DataFrame` methods\n", "_____no_output_____" ], [ "**Get a quick overview of the data set**", "_____no_output_____" ] ], [ [ "df.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 4 entries, 0 to 3\nData columns (total 6 columns):\nLast Name 4 non-null object\nName 4 non-null object\ndead 4 non-null bool\nid 4 non-null int64\nno_of_songs 4 non-null int64\nyear_born 4 non-null int64\ndtypes: bool(1), int64(3), object(2)\nmemory usage: 244.0+ bytes\n" ], [ "df.describe()", "_____no_output_____" ], [ "df.describe(include=\"all\")", "_____no_output_____" ] ], [ [ "**Change index to the variable `id`**", "_____no_output_____" ] ], [ [ "df", "_____no_output_____" ], [ "df.set_index(\"id\")", "_____no_output_____" ], [ "df", "_____no_output_____" ] ], [ [ "Note that nothing changed!!\n\nFor the purpose of memory and computation efficiency `pandas` returns a view of the object, rather than a copy. Hence, if we want to make a permanent change we have to assign/reassign the object to a variable:\n\n df = df.set_index(\"id\") \n \nor, some methods have the `inplace=True` argument:\n\n df.set_index(\"id\", inplace=True) ", "_____no_output_____" ] ], [ [ "df = df.set_index(\"id\")", "_____no_output_____" ], [ "df", "_____no_output_____" ] ], [ [ "**Arithmetic methods**", "_____no_output_____" ] ], [ [ "df.sum()", "_____no_output_____" ], [ "df.sum(axis=1)", "_____no_output_____" ] ], [ [ "#### `groupby` method\n[Hadley Wickham 2011: The Split-Apply-Combine Strategy for Data Analysis, Journal of Statistical Software, 40(1)](https://www.jstatsoft.org/article/view/v040i01)", "_____no_output_____" ], [ "<img src=\"./_img/split-apply-combine.svg\" width=\"600\">\n\nImage source: [Jake VanderPlas 2016, Data Science Handbook](https://jakevdp.github.io/PythonDataScienceHandbook/)", "_____no_output_____" ] ], [ [ "df", "_____no_output_____" ], [ "df.groupby(\"dead\")", "_____no_output_____" ], [ "df.groupby(\"dead\").sum()", "_____no_output_____" ], [ "df.groupby(\"dead\")[\"no_of_songs\"].sum()", "_____no_output_____" ], [ "df.groupby(\"dead\")[\"no_of_songs\"].mean()", "_____no_output_____" ], [ "df.groupby(\"dead\")[\"no_of_songs\"].agg([\"mean\", \"max\", \"min\"])", "_____no_output_____" ] ], [ [ "#### Family of `apply`/`map` methods\n\n* `apply` works on a row (`axis=0`, default) / column (`axis=1`) basis of a `DataFrame`\n* `applymap` works __element-wise__ on a `DataFrame`\n* `map` works __element-wise__ on a `Series`.\n", "_____no_output_____" ] ], [ [ "df", "_____no_output_____" ], [ "# (axis=0, default)\ndf[[\"Last Name\", \"Name\"]].apply(lambda x: x.sum())", "_____no_output_____" ], [ "# (axis=1)\ndf[[\"Last Name\", \"Name\"]].apply(lambda x: x.sum(), axis=1)", "_____no_output_____" ] ], [ [ "_... maybe a more useful case..._", "_____no_output_____" ] ], [ [ "df.apply(lambda x: \" \".join(x[[\"Name\", \"Last Name\"]]), axis=1)", "_____no_output_____" ] ], [ [ "***\n### Selection and Indexing", "_____no_output_____" ], [ "**Column index**", "_____no_output_____" ] ], [ [ "df[\"Name\"]", "_____no_output_____" ], [ "df[[\"Name\", \"Last Name\"]]", "_____no_output_____" ], [ "df.dead", "_____no_output_____" ] ], [ [ "**Row index**\n\nIn addition to the `[]` operator pandas ships with other indexing operators such as `.loc[]` and `.iloc[]`, among others.\n\n* `.loc[]` is primarily __label based__, but may also be used with a boolean array.\n* `iloc[]` is primarily __integer position based__ (from 0 to length-1 of the axis), but may also be used with a boolean array. \n", "_____no_output_____" ] ], [ [ "df.head(2)", "_____no_output_____" ], [ "df.loc[1]", "_____no_output_____" ], [ "df.iloc[1]", "_____no_output_____" ] ], [ [ "**Row and Columns indices**\n\n`df.loc[row, col]`", "_____no_output_____" ] ], [ [ "df.loc[1, \"Last Name\"]", "_____no_output_____" ], [ "df.loc[2:4, [\"Name\", \"dead\"]]", "_____no_output_____" ] ], [ [ "**Logical indexing**", "_____no_output_____" ] ], [ [ "df", "_____no_output_____" ], [ "df.no_of_songs > 50", "_____no_output_____" ], [ "df.loc[df.no_of_songs > 50]", "_____no_output_____" ], [ "df.loc[(df.no_of_songs > 50) & (df.year_born >= 1942)]", "_____no_output_____" ], [ "df.loc[(df.no_of_songs > 50) & (df.year_born >= 1942), [\"Last Name\", \"Name\"]]", "_____no_output_____" ] ], [ [ "***\n\n### Manipulating columns, rows and particular entries", "_____no_output_____" ], [ "**Add a row to the data set**", "_____no_output_____" ] ], [ [ "from numpy import nan\ndf.loc[5] = [\"Mouse\", \"Mickey\", nan, nan, 1928]\ndf", "_____no_output_____" ], [ "df.dtypes", "_____no_output_____" ] ], [ [ "_Note that the variable `dead` changed. Its values changed from `True`/`False` to `1.0`/`0.0`. Consequently its `dtype` changed from `bool` to `float64`._", "_____no_output_____" ], [ "**Add a column to the data set**", "_____no_output_____" ] ], [ [ "pd.datetime.today()", "_____no_output_____" ], [ "now = pd.datetime.today().year\nnow", "_____no_output_____" ], [ "df[\"age\"] = now - df.year_born\ndf", "_____no_output_____" ] ], [ [ "**Change a particular entry**", "_____no_output_____" ] ], [ [ "df.loc[5, \"Name\"] = \"Mini\" ", "_____no_output_____" ], [ "df", "_____no_output_____" ] ], [ [ "***\n## Plotting\n\nThe plotting functionality in pandas is built on top of matplotlib. It is quite convenient to start the visualization process with basic pandas plotting and to switch to matplotlib to customize the pandas visualization.", "_____no_output_____" ], [ "### `plot` method", "_____no_output_____" ] ], [ [ "# this call causes the figures to be plotted below the code cells\n% matplotlib inline", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df[[\"no_of_songs\", \"age\"]].plot()", "_____no_output_____" ], [ "df[\"dead\"].plot.hist()", "_____no_output_____" ], [ "df[\"age\"].plot.bar()", "_____no_output_____" ] ], [ [ "## ...some notes on plotting with Python\n\n\nPlotting is an essential component of data analysis. However, the Python visualization world can be a frustrating place. There are many different options and choosing the right one is a challenge. (If you dare take a look at the [Python Visualization Landscape](https://github.com/rougier/python-visualization-landscape).)\n\n\n[matplotlib](https://matplotlib.org/) is probably the most well known 2D plotting Python library. It allows to produce publication quality figures in a variety of formats and interactive environments across platforms. However, matplotlib is the cause of frustration due to the complex syntax and due to existence of two interfaces, a __MATLAB like state-based interface__ and an __object-oriented interface__. Hence, __there is always more than one way to build a visualization__. Another source of confusion is that matplotlib is well integrated into other Python libraries, such as [pandas](http://pandas.pydata.org/index.html), [seaborn](http://seaborn.pydata.org/index.html), [xarray](http://xarray.pydata.org/en/stable/), among others. Hence, there is confusion as when to use pure matplotlib or a tool that is built on top of matplotlib.", "_____no_output_____" ], [ "We import the `matplotlib` library and matplotlib's `pyplot` module using the canonical commands\n\n import matplotlib as mpl\n import matplotlib.pyplot as plt\n\nWith respect to matplotlib terminology it is important to understand that the __`Figure`__ is the final image that may contain one or more axes, and that the __`Axes`__ represents an individual plot.\n\nTo create a `Figure` object we call\n\n fig = plt.figure()\n\nHowever, a more convenient way to create a `Figure` object and an `Axes` object at once, is to call\n\n fig, ax = plt.subplots() \n\nThen we can use the `Axes` object to add data for ploting. ", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n\n# create a Figure and Axes object\nfig, ax = plt.subplots(figsize=(10,5)) \n\n# plot the data and reference the Axes object\ndf[\"age\"].plot.bar(ax=ax)\n\n# add some customization to the Axes object\nax.set_xticklabels(df.Name, rotation=0)\nax.set_xlabel(\"\")\nax.set_ylabel(\"Age\", size=14)\nax.set_title(\"The Beatles and ... something else\", size=18);", "_____no_output_____" ] ], [ [ "Note that we are only scratching the surface of the plotting capabilities with pandas. Refer to the pandas online documentation ([here](https://pandas.pydata.org/pandas-docs/stable/visualization.html)) for a comprehensive overview.\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
c530666f9afc54118d24106cc611d215cfe4f1ac
10,486
ipynb
Jupyter Notebook
Code/results_ad_hoc.ipynb
tatonetti-lab/sex_risks
caa7159993921aa2861cae78a56c3e72052b4ea6
[ "MIT" ]
3
2019-08-18T07:39:43.000Z
2021-06-05T13:45:44.000Z
Code/results_ad_hoc.ipynb
tatonetti-lab/sex_risks
caa7159993921aa2861cae78a56c3e72052b4ea6
[ "MIT" ]
null
null
null
Code/results_ad_hoc.ipynb
tatonetti-lab/sex_risks
caa7159993921aa2861cae78a56c3e72052b4ea6
[ "MIT" ]
3
2018-11-06T02:58:23.000Z
2020-10-20T15:36:19.000Z
32.565217
175
0.489224
[ [ [ "# Run AwareDX ad-hoc on any drug and adverse event", "_____no_output_____" ] ], [ [ "from os import path\nfrom collections import Counter, defaultdict\nfrom tqdm.notebook import tqdm\nimport numpy as np\nimport pandas as pd \nimport feather \nimport scipy.stats\nfrom scipy import stats\nimport pymysql\nimport pymysql.cursors\nfrom database import Database\nfrom utils import Utils\nfrom drug import Drug\n\nu = Utils()\ndb = Database('Mimir from Munnin')\nnp.random.seed(u.RANDOM_STATE)", "_____no_output_____" ], [ "def compile(results): \n \n results = results.dropna()\n results = results.reset_index()\n num_tests = results.shape[0]\n results.loc[:,'bonf_p_value'] = results.get('p_value') * num_tests\n #results = results.query('bonf_p_value<1')\n \n drug_adr_pairs = results.get(['drug','itr','adr']).groupby(by=['drug','adr']).count().query('itr==25').reset_index().get(['drug', 'adr'])\n\n scores = pd.DataFrame(columns=['drug', 'adr', 'p_val_min', 'p_val_med', 'p_val_max', 'logROR_avg','logROR_ci95_low', 'logROR_ci95_upp']).set_index(['drug','adr'])\n\n def mean_confidence_interval(data, confidence=0.95):\n a = 1.0 * np.array(data)\n n = len(a)\n m, se = np.mean(a), scipy.stats.sem(a)\n h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)\n return m, m-h, m+h\n\n for _, (drug, adr) in tqdm(drug_adr_pairs.iterrows(), total=drug_adr_pairs.shape[0]):\n data = results.query('drug==@drug and adr==@adr')\n\n bonf_p = data['bonf_p_value'].values \n scores.at[(drug, adr), 'p_val_min'] = np.min(bonf_p)\n scores.at[(drug, adr), 'p_val_med'] = np.median(bonf_p)\n scores.at[(drug, adr), 'p_val_max'] = np.max(bonf_p)\n\n logROR = data['logROR'].values \n mean, lower, upper = mean_confidence_interval(logROR)\n scores.at[(drug, adr), 'logROR_avg'] = mean\n scores.at[(drug, adr), 'logROR_ci95_low'] = lower\n scores.at[(drug, adr), 'logROR_ci95_upp'] = upper\n\n scores = scores.reset_index()\n\n name_atc4, name_atc5, name_hlgt, name_soc, name_pt = defaultdict(str), defaultdict(str), defaultdict(str), defaultdict(str), defaultdict(str)\n\n for id_, name in db.run('select * from atc_4_name'): \n name_atc4[str(id_)] = name\n\n for id_, name in db.run('select * from atc_5_name'): \n name_atc5[str(id_)] = name\n\n for id_, name in db.run('select * from hlgt_name'): \n name_hlgt[id_] = name\n\n for id_, name in db.run('select * from soc_name'): \n name_soc[id_] = name\n\n for id_, name in db.run('select * from pt_name'): \n name_pt[id_] = name\n\n\n scores['drug_name'] = ''\n scores['drug_class'] = 0\n scores = scores.set_index('drug')\n for id_ in np.unique(scores.index): \n if name_atc4[id_]: \n scores.at[id_, 'drug_name'] = name_atc4[id_]\n scores.at[id_, 'drug_class'] = 4\n else:\n scores.at[id_, 'drug_name'] = name_atc5[id_]\n scores.at[id_, 'drug_class'] = 5\n scores = scores.reset_index()\n\n scores['adr_name'] = ''\n scores['adr_class'] = ''\n scores = scores.set_index('adr')\n for id_ in np.unique(scores.index): \n if name_soc[id_]: \n scores.at[id_, 'adr_name'] = name_soc[id_]\n scores.at[id_, 'adr_class'] = 'soc'\n elif name_hlgt[id_]: \n scores.at[id_, 'adr_name'] = name_hlgt[id_]\n scores.at[id_, 'adr_class'] = 'hlgt'\n elif name_pt[id_]: \n scores.at[id_, 'adr_name'] = name_pt[id_]\n scores.at[id_, 'adr_class'] = 'pt'\n scores = scores.reset_index()\n \n return scores", "_____no_output_____" ], [ "drug_name = input(' Enter ATC drug name: ')\nq_atc5 = \"select atc_5_id from atc_5_name where atc_5_name=\\'\"+drug_name+\"\\'\"\nq_atc4 = \"select atc_4_id from atc_4_name where atc_4_name=\\'\"+drug_name+\"\\'\"\ntry:\n if db.get_list(q_atc5): \n drugID = db.get_list(q_atc5)[0]\n else: \n drugID = db.get_list(q_atc4)[0]\nexcept:\n raise NameError(\"drug not found\")\nif not drugID: raise NameError(\"drug not found\")\n \n \nadr_name = input(' Enter MedDRA outcome name: ')\nq = \"select meddra_concept_id from pt_name where meddra_concept_name=\\'\"+adr_name+\"\\'\"\ntry: \n adrID = db.get_list(q)\nexcept: raise NameError(\"adr not found\")\nif not adrID: raise NameError(\"adr not found\")\n \nfilename = 'Ad_Hoc/'+str(drugID)+'_'+str(adrID)\nprint(\"Checking for {}\".format(filename))\n\nif path.exists(u.DATA_PATH+filename+'.feather'): \n results = u.load_df(filename)\n print(\"Found!\")\nelse: \n print(\"Not found, running ad-hoc\")\n iterations=25\n drug = Drug(drugID, adrID)\n\n for itr in tqdm(range(1, iterations+1)): \n drug.match()\n drug.count_adr()\n drug.assign_abcd(itr)\n drug.do_chi_square()\n drug.calc_logROR()\n drug.reset_for_next_itr()\n\n assert drug.ensure_results(itr)\n\n results = compile(drug.results)\n u.save_df(results, filename)\n \nu.print_table(results)\nresults", " Enter ATC drug name: Fentanyl\n Enter MedDRA outcome name: Cardiac arrest\nChecking for Ad_Hoc/21604201_[35204966]\nFound!\n\nM \t 0.27 (0.26, 0.28) \t fentanyl \t pt Cardiac arrest\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ] ]
c53066841c396f45b7dd86dac4141c4ff3a49d3e
237,245
ipynb
Jupyter Notebook
Python/clases/3_algebra_lineal/2_interpolacion.ipynb
CarlosJChV/Propedeutico
d903192ffa64a7576faace68c2256e69bc11087c
[ "Apache-2.0" ]
null
null
null
Python/clases/3_algebra_lineal/2_interpolacion.ipynb
CarlosJChV/Propedeutico
d903192ffa64a7576faace68c2256e69bc11087c
[ "Apache-2.0" ]
null
null
null
Python/clases/3_algebra_lineal/2_interpolacion.ipynb
CarlosJChV/Propedeutico
d903192ffa64a7576faace68c2256e69bc11087c
[ "Apache-2.0" ]
1
2020-09-16T14:38:59.000Z
2020-09-16T14:38:59.000Z
93.477147
48,311
0.823444
[ [ [ "**Notas para contenedor de docker:**", "_____no_output_____" ], [ "Comando de docker para ejecución de la nota de forma local:\n\nnota: cambiar `dir_montar` por la ruta de directorio que se desea mapear a `/datos` dentro del contenedor de docker.\n\n```\ndir_montar=<ruta completa de mi máquina a mi directorio>#aquí colocar la ruta al directorio a montar, por ejemplo: \n#dir_montar=/Users/erick/midirectorio.\n```\n\nEjecutar:\n\n```\n$docker run --rm -v $dir_montar:/datos --name jupyterlab_prope_r_kernel_tidyverse -p 8888:8888 -d palmoreck/jupyterlab_prope_r_kernel_tidyverse:2.1.4 \n\n```", "_____no_output_____" ], [ "Ir a `localhost:8888` y escribir el password para jupyterlab: `qwerty`\n\nDetener el contenedor de docker:\n\n```\ndocker stop jupyterlab_prope_r_kernel_tidyverse\n```\n", "_____no_output_____" ], [ "Documentación de la imagen de docker `palmoreck/jupyterlab_prope_r_kernel_tidyverse:2.1.4` en [liga](https://github.com/palmoreck/dockerfiles/tree/master/jupyterlab/prope_r_kernel_tidyverse).", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "Para ejecución de la nota usar:\n\n[docker](https://www.docker.com/) (instalación de forma **local** con [Get docker](https://docs.docker.com/install/)) y ejecutar comandos que están al inicio de la nota de forma **local**. \n", "_____no_output_____" ], [ "[![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/palmoreck/dockerfiles-for-binder/jupyterlab_prope_r_kernel_tidyerse?urlpath=lab/tree/Propedeutico/Python/clases/3_algebra_lineal/2_interpolacion.ipynb) esta opción crea una máquina individual en un servidor de Google, clona el repositorio y permite la ejecución de los notebooks de jupyter.", "_____no_output_____" ], [ "[![Run on Repl.it](https://repl.it/badge/github/palmoreck/dummy)](https://repl.it/languages/python3) Esta opción no clona el repositorio, no ejecuta los notebooks de jupyter pero permite ejecución de instrucciones de Python de forma colaborativa con [repl.it](https://repl.it/). Al dar click se crearán nuevos ***repl*** debajo de sus users de ***repl.it***.\n", "_____no_output_____" ], [ "**Nota importante: Para esta nota hay que usar el jupyter notebook clásico. Si están en jupyterlab deben dar click en la tab de *Help* y ahí está la opción de usar el *notebook* clásico. También asegúrense que sólo estén usando de forma local el notebook clásico de jupyter y no al mismo tiempo con el jupyterlab.**", "_____no_output_____" ], [ "<img src=\"https://dl.dropboxusercontent.com/s/41fjwmyxzk5ocgn/launch_classic_jupyter_notebook.png?dl=0\" heigth=\"300\" width=\"300\">", "_____no_output_____" ], [ "**Se utiliza la versión clásica pues se usará el comando de magic `%matplotlib notebook`**", "_____no_output_____" ], [ "# Interpolación", "_____no_output_____" ], [ "Dados $n+1$ puntos $x_0,x_1,\\dots,x_n$ el objetivo es construir una función $f(x)$ tal que $f(x_i) = y_i$ con $y_i$ conocido $\\forall i=0,1,\\dots,n$.", "_____no_output_____" ], [ "<img src=\"https://dl.dropboxusercontent.com/s/m0gks881yffz85f/interpolacion.jpg?dl=0\" heigth=\"300\" width=\"300\">", "_____no_output_____" ], [ "Entre las aplicaciones en interpolación se encuentran:\n\n* Reconstrucción de funciones.\n* Aproximación a derivadas e integrales.\n* Estimación de funciones en cantidades no conocidas.", "_____no_output_____" ], [ "## Modelo en interpolación", "_____no_output_____" ], [ "Típicamente el modelo $f$ es de la forma $f(x|w) = \\displaystyle \\sum_{j=0}^nw_j \\phi_j(x)$ con $\\phi_j:\\mathbb{R} \\rightarrow \\mathbb{R}$ funciones conocidas y $w_j$ parámetros desconocidos por determinar $\\forall j=0,1,\\dots,n$.", "_____no_output_____" ], [ "**Obs:**\n\n* Comúnmente las $\\phi_j$'s son funciones polinomiales, trigonométricas, racionales y exponenciales.\n* La notación $f(x|w)$ se utiliza para denotar que $w$ es un vector de parámetros a estimar.", "_____no_output_____" ], [ "## ¿Cómo ajustar el modelo anterior?", "_____no_output_____" ], [ "El problema de interpolación conduce a plantear y posteriormente resolver un sistema de ecuaciones lineales de la forma $Aw = y$ pues la condición de interpolación es: $f(x_i|w_i) = y_i$, $\\forall i=0,1,\\dots,n$ con $A \\in \\mathbb{R}^{{n+1}x{n+1}}$, $w,y \\in \\mathbb{R}^{n+1}$ definidas como sigue:", "_____no_output_____" ], [ "$$A = \\left[\\begin{array}{cccc}\n\\phi_0(x_0) &\\phi_1(x_0)&\\dots&\\phi_n(x_0)\\\\\n\\phi_0(x_1) &\\phi_1(x_1)&\\dots&\\phi_n(x_1)\\\\\n\\vdots &\\vdots& \\vdots&\\vdots\\\\\n\\phi_0(x_n) &\\phi_1(x_n)&\\dots&\\phi_n(x_n)\n\\end{array}\n\\right],\nw=\n\\left[\\begin{array}{c}\nw_0\\\\\nw_1\\\\\n\\vdots \\\\\nw_n\n\\end{array}\n\\right] ,\ny=\n\\left[\\begin{array}{c}\ny_0\\\\\ny_1\\\\\n\\vdots \\\\\ny_n\n\\end{array}\n\\right]\n$$", "_____no_output_____" ], [ "Esto es, hay que resolver: $$\\begin{array}{ccc} \\phi_0(x_0)w_0 + \\phi_1(x_0)w_1 + \\cdots + \\phi_n(x_0)w_n &= & y_0 \\\\ \\phi_0(x_1)w_0 + \\phi_1(x_1)w_1 + \\cdots + \\phi_n(x_1)w_n &= & y_1\\\\ \\vdots & & \\\\ \\phi_0(x_n)w_0 + \\phi_1(x_n)w_1 + \\cdots + \\phi_n(x_n)w_n &= & y_n \\end{array}$$ ", "_____no_output_____" ], [ "que es la condición de interpolación $f(x_i|w) = y_i \\forall i=0,1,\\dots,n$ bajo el modelo: $f(x|w) = \\displaystyle \\sum_{j=0}^nw_j \\phi_j(x)$ en notación **matricial**.", "_____no_output_____" ], [ "## Interpolación polinomial: funciones $\\phi_j$'s son polinomios", "_____no_output_____" ], [ "**En numpy ...**", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport pprint", "_____no_output_____" ] ], [ [ "Supongamos que queremos realizar la interpolación a los siguientes puntos:", "_____no_output_____" ] ], [ [ "#pseudorandom array\nnp.random.seed(2000) #for reproducibility\nnpoints = 6\nx = np.random.randn(npoints) + 10\ny = np.random.randn(npoints) - 10\n", "_____no_output_____" ], [ "pprint.pprint('x:')\npprint.pprint(x)\npprint.pprint('y:')\npprint.pprint(y)", "'x:'\narray([11.73673761, 11.89791391, 7.89322658, 9.85108791, 10.58306155,\n 7.74076697])\n'y:'\narray([ -9.86276046, -10.70121322, -10.62078008, -10.47961976,\n -8.79026123, -11.07518386])\n" ] ], [ [ "ver: [numpy.random.randn](https://docs.scipy.org/doc/numpy/reference/generated/numpy.random.randn.html#numpy.random.randn)", "_____no_output_____" ], [ "**Los datos ejemplo**", "_____no_output_____" ] ], [ [ "plt.plot(x,y, 'r*')\nplt.xlabel('x')\nplt.ylabel('y')\nplt.title('Puntos ejemplo')\nplt.show()", "_____no_output_____" ] ], [ [ "Con numpy podemos usar la función `polyfit` en el paquete de `numpy` para realizar lo anterior: (ver [numpy.polyfit](https://docs.scipy.org/doc/numpy/reference/generated/numpy.polyfit.html))", "_____no_output_____" ], [ "El tercer argumento de polyfit especifica el grado del polinomio a ajustar. Como tenemos `npoints = 6` puntos, entonces debemos generar un polinomio de grado $5$", "_____no_output_____" ] ], [ [ "ndegree = npoints -1\ncoefficients = np.polyfit(x,y,ndegree)", "_____no_output_____" ] ], [ [ "Una vez realizado el llamado a la función `polyfit` se regresan los coeficientes de $x$ ordenados del mayor grado al menor.", "_____no_output_____" ] ], [ [ "np.set_printoptions(precision = 2) #sólo dos decimales que se muestren\npprint.pprint(coefficients)", "array([ 8.16e-02, -4.26e+00, 8.78e+01, -8.95e+02, 4.50e+03, -8.98e+03])\n" ] ], [ [ "Entonces nuestro polinomio es: $$p_{npoints}(x) = .0816x^5 -4.26x^4 +87.8x^3-895x^2+4500x-8980$$", "_____no_output_____" ], [ "**Nota: si queremos utilizar una representación con la matriz de [Vandermonde](https://en.wikipedia.org/wiki/Vandermonde_matrix) para el sistema de ecuaciones que se resolvió se tiene la siguiente representación matricial:**", "_____no_output_____" ], [ "$$\\left[\\begin{array}{ccccc}\n1 & x_0 & x_0^2 & x_0^3 & x_0^4 & x_0^5 \\\\\n1 & x_1 & x_1^2 & x_1^3 & x_1^4 & x_1^5\\\\\n\\vdots &\\vdots& \\vdots&\\vdots\\\\\n1 & x_5 & x_5^2 & x_5^3 & x_5^4 & x_5^5\n\\end{array}\n\\right]\n\\left[\\begin{array}{c}\n-8980\\\\\n4500\\\\\n\\vdots \\\\\n.0816\n\\end{array}\n\\right] =\n\\left[\\begin{array}{c}\ny_0\\\\\ny_1\\\\\n\\vdots \\\\\ny_5\n\\end{array}\n\\right]\n$$", "_____no_output_____" ], [ "**Obs: hay diferentes representaciones matriciales para el problema de interpolación, por ejemplo representación por [Newton](https://en.wikipedia.org/wiki/Newton_polynomial) o por [Lagrange](https://en.wikipedia.org/wiki/Lagrange_polynomial). Cualquiera de las representaciones que se utilicen obtienen el mismo interpolador, la diferencia consiste en propiedades que tienen las matrices de cada representación (la matriz de Vandermonde para un grado alto conduce a tener sistemas de ecuaciones lineales muy sensibles a perturbaciones en los datos).**", "_____no_output_____" ], [ "**La gráfica**", "_____no_output_____" ], [ "Ahora nos gustaría graficarlo en el intervalo `[min(x),max(x)]` con `min(x)` la entrada con valor mínimo del numpy array `x` y `max(x)` su entrada con valor máximo.", "_____no_output_____" ], [ "Para lo anterior debemos evaluar $p_{npoints}(x)$ en diferentes valores de $x$. Para esto, generamos un numpy array con un número de puntos `neval`:", "_____no_output_____" ] ], [ [ "neval = 100\nxeval = np.linspace(min(x),max(x), neval)\nyeval = np.polyval(coefficients,xeval)", "_____no_output_____" ], [ "print('xeval.shape:', xeval.shape[0])\nprint('yeval.shape:', yeval.shape[0])", "xeval.shape: 100\nyeval.shape: 100\n" ], [ "plt.plot(x, y, 'r*', xeval, yeval, 'k-')\nplt.legend(['datos','interpolador'], loc='best')\nplt.show()", "_____no_output_____" ], [ "max(yeval)", "_____no_output_____" ] ], [ [ "Si tuviéramos que estimar cantidades negativas con nuestro interpolador, entonces la siguiente estimación calcularíamos:", "_____no_output_____" ] ], [ [ "np.polyval(coefficients, 8.5)", "_____no_output_____" ] ], [ [ "### Problema con: número de puntos y la interpolación polinomial", "_____no_output_____" ], [ "Si incrementamos a 9 puntos por los que deseamos hacer pasar un interpolador tenemos:", "_____no_output_____" ] ], [ [ "#pseudorandom array\nnp.random.seed(2000) #for reproducibility\nnpoints = 9\nx = np.random.randn(npoints) + 10\ny = np.random.randn(npoints) - 10", "_____no_output_____" ], [ "pprint.pprint('x:')\npprint.pprint(x)\npprint.pprint('y:')\npprint.pprint(y)", "'x:'\narray([11.74, 11.9 , 7.89, 9.85, 10.58, 7.74, 10.14, 9.3 , 9.38])\n'y:'\narray([-10.48, -8.79, -11.08, -9.19, -10.29, -10.22, -10.17, -8.9 ,\n -9.92])\n" ] ], [ [ "**Los datos**", "_____no_output_____" ] ], [ [ "plt.plot(x,y, 'r*')\nplt.xlabel('x')\nplt.ylabel('y')\nplt.title('Puntos ejemplo')\nplt.show()", "_____no_output_____" ], [ "ndegree = npoints -1\nnew_coefficients = np.polyfit(x,y,ndegree)\npprint.pprint(new_coefficients)", "array([ 2.55e+00, -2.01e+02, 6.94e+03, -1.36e+05, 1.66e+06, -1.30e+07,\n 6.31e+07, -1.75e+08, 2.11e+08])\n" ] ], [ [ "Nuestro polinomio ahora es (considerando dos dígitos a la derecha del punto decimal de los resultados anteriores): $$p_{npoints}(x) = 2.55x^8 -201x^7 + 6940x^6-1.36*10^5x^5+1.66*10^6x^4-1.3*10^7x^3 +6.31*10^7x^2-1.75*10^8x+2.11*10^8$$", "_____no_output_____" ], [ "**La gráfica**", "_____no_output_____" ] ], [ [ "neval = 100\nxeval = np.linspace(min(x),max(x), neval)\nyeval = np.polyval(new_coefficients,xeval)", "_____no_output_____" ], [ "print('xeval.shape:', xeval.shape[0])\nprint('yeval.shape:', yeval.shape[0])", "xeval.shape: 100\nyeval.shape: 100\n" ] ], [ [ "Obsérvese la oscilación que debe tener el polinomio de grado $9$ para pasar por los $10$ puntos:", "_____no_output_____" ] ], [ [ "plt.plot(x, y, 'r*',xeval, yeval, 'k-')\nplt.legend(['datos','interpolador'], loc='best')\nplt.show()", "_____no_output_____" ], [ "max(yeval)", "_____no_output_____" ] ], [ [ "Este tipo de oscilación es típica al tener un polinomio mayor o igual a $6$ (más de $7$ puntos). Si tuviéramos que estimar cantidades negativas con nuestro interpolador, entonces la siguiente estimación sería errórena:", "_____no_output_____" ] ], [ [ "np.polyval(new_coefficients,8.5)", "_____no_output_____" ] ], [ [ "lo cual es erróneo.", "_____no_output_____" ], [ "**Nota**", "_____no_output_____" ], [ "Los interpoladores obtenidos con alguno de los métodos anteriores se utilizan para estimar cantidades en el intervalo con el que fueron construídos. Si deseamos estimar fuera del intervalo debe de realizarse con cuidado pues se pueden tener estimaciones incorrectas.", "_____no_output_____" ] ], [ [ "np.polyval(coefficients, 15)", "_____no_output_____" ], [ "np.polyval(new_coefficients, 15)", "_____no_output_____" ] ], [ [ "### Polinomios piecewise", "_____no_output_____" ], [ "Para arreglar la oscilación de interpoladores de grado alto, una solución es interpolar con polinomios de grado bajo en cada subintervalo compuesto por las $x$'s, esto es, una forma *piecewise*. En python se realiza con el método `interpolate` del paquete `scipy`:", "_____no_output_____" ], [ "**Lineal**", "_____no_output_____" ] ], [ [ "from scipy.interpolate import interp1d", "_____no_output_____" ], [ "pw_l = interp1d(x, y) #linear piecewise ", "_____no_output_____" ], [ "neval = 100\nxeval = np.linspace(min(x),max(x), neval)\nyeval = pw_l(xeval)", "_____no_output_____" ], [ "print('xeval.shape:', xeval.shape[0])\nprint('yeval.shape:', yeval.shape[0])", "xeval.shape: 100\nyeval.shape: 100\n" ], [ "plt.plot(x, y, 'r*',xeval, yeval, 'k-')\nplt.legend(['datos','interpolador lineal piecewise'], loc='best')\nplt.show()", "_____no_output_____" ] ], [ [ "Aunque se ha resuelto la estimación:", "_____no_output_____" ] ], [ [ "print(pw_l(8.5))", "-10.1358074696758\n" ] ], [ [ "**Splines**", "_____no_output_____" ], [ "Los *splines* cúbicos *piecewise* resuelven la no diferenciabilidad del interpolador lineal en los puntos dados:", "_____no_output_____" ] ], [ [ "pw_spline = interp1d(x, y, kind = 'cubic') #spline piecewise \nneval = 100\nxeval = np.linspace(min(x),max(x), neval)\nyeval = pw_spline(xeval)", "_____no_output_____" ], [ "print('xeval.shape:', xeval.shape[0])\nprint('yeval.shape:', yeval.shape[0])", "xeval.shape: 100\nyeval.shape: 100\n" ], [ "plt.plot(x, y, 'r*',xeval, yeval, 'k-')\nplt.legend(['datos','cubic splines piecewise'], loc='best')\nplt.show()", "_____no_output_____" ], [ "print(pw_spline(8.5))", "-8.864195192656851\n" ] ], [ [ "Ver: [Interpolation (scipy.interpolate)](https://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html)", "_____no_output_____" ], [ "**(Tarea)Ejercicio: Interpolar con 10 puntos generados de la función de [Runge](https://en.wikipedia.org/wiki/Runge%27s_phenomenon) $f(x) = \\frac{1}{1+25x^2}$ en el intervalo $[-1,1]$ equidistantes. Hacer la gráfica con $10,000$ puntos en el mismo intervalo. Utilizar polyfit para el polinomio interpolador y splines cúbicos.**", "_____no_output_____" ], [ "# Curvas paramétricas e interpolación", "_____no_output_____" ], [ "Ninguna de las técnicas vistas anteriormente pueden usarse **directamente** para generar curvas como la de una circunferencia:", "_____no_output_____" ] ], [ [ "radius = 1\nnpoints = 100", "_____no_output_____" ], [ "x = np.linspace(-radius,radius,npoints)\ny1 = np.sqrt(radius-x**2)\ny2 = -np.sqrt(radius-x**2)", "_____no_output_____" ], [ "plt.plot(x,y1,'m',\n x,y2,'m')\nplt.title(\"Circunferencia\")\nplt.show()", "_____no_output_____" ] ], [ [ "pues no puede expresarse como una función del tipo: $y = f(x)$. Obsérvese que para la gráfica anterior se han usado dos funciones: $y_1 = f_1(x) = \\sqrt{r-x^2}$, $y_2 = f_2(x) = -\\sqrt{r-x^2}$.", "_____no_output_____" ], [ "Lo anterior puede resolverse definiendo una función, $f: \\mathbb{R} \\rightarrow \\mathbb{R}^2$, de un parámetro $t$ que tome valores en el intervalo $[0,2\\pi)$ y definida por $f(t) = (\\cos(t), \\sin(t))$. Obsérvese que para $t=0$ se obtiene el punto $(1,0)$, para $t=\\frac{\\pi}{2}$ se obtiene $(0,1)$ y así sucesivamente hasta $t=2\\pi$ en el que obtendríamos nuevamente el punto $(1,0)$. Para este caso se cumple:\n\n$$f(t) = (x(t), y(t))$$\n\ncon $x(t) = \\cos(t)$, $y(t) = \\sin(t)$ funciones tales que $x : \\mathbb{R} \\rightarrow \\mathbb{R}$, $y: \\mathbb{R} \\rightarrow \\mathbb{R}$.", "_____no_output_____" ] ], [ [ "import time", "_____no_output_____" ], [ "npoints = 100\na = 0\nb = 2*np.pi\nt = np.linspace(a,b,npoints)\nx = np.cos(t)\ny = np.sin(t)\nx_min = np.min(x)\ny_min = np.min(y)\nx_max = np.max(x)\ny_max = np.max(y)", "_____no_output_____" ] ], [ [ "Ver [plt.draw](https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.draw.html#matplotlib-pyplot-draw)", "_____no_output_____" ] ], [ [ "def make_plot(ax, idx):\n ax.plot(x[:idx], y[:idx])\n window = 0.5\n plt.xlim(x_min-window, x_max+window)\n plt.ylim(y_min-window, y_max+window)\n plt.plot(x[:idx], y[:idx], 'mo')\n fig.canvas.draw() #redraw the current figure", "_____no_output_____" ] ], [ [ "Ver: [matplotlib magic command](https://ipython.readthedocs.io/en/stable/interactive/magics.html#magic-matplotlib), [plt.subplots](https://matplotlib.org/3.2.1/api/_as_gen/matplotlib.pyplot.subplots.html#matplotlib-pyplot-subplots)", "_____no_output_____" ] ], [ [ "%matplotlib notebook\n#for interactive plotting\nfig, ax = plt.subplots() #create figure that will be used\n #in make_plot func. Also retrieve axes\nfor idx,_ in enumerate(t): #enumerate creates tuples \n #in a sequentially way\n make_plot(ax, idx)\n time.sleep(0.2)", "_____no_output_____" ] ], [ [ "**Nota: Hay que dar click en el botón arriba de la figura de apagar interactividad.**", "_____no_output_____" ], [ "## Ejemplo", "_____no_output_____" ], [ "**Nota importante: si están usando el botón de binder para ejecución de forma interactiva no utilicen el comando de `wget` para descargar su imagen, mejor utilicen la funcionalidad del jupyter notebook clásico para subir archivos:**", "_____no_output_____" ], [ "<img src=\"https://dl.dropboxusercontent.com/s/1v78rge4ehylmi2/upload_in_classic_jupyter_notebooks.png?dl=0\" heigth=\"900\" width=\"900\">", "_____no_output_____" ], [ "**Y asegúrense que están subiendo la imagen en la ruta `/Propedeutico/Python/clases/3_algebra_lineal`**", "_____no_output_____" ], [ "**No olviden dar click en `Upload` dos veces:**", "_____no_output_____" ], [ "<img src=\"https://dl.dropboxusercontent.com/s/oa1rnxf5ryxdigg/upload_in_classic_jupyter_notebooks_2.png?dl=0\" heigth=\"300\" width=\"300\">", "_____no_output_____" ], [ "Usemos la imagen siguiente para realizar una interpolación a una curva paramétrica con *splines*:", "_____no_output_____" ] ], [ [ "!wget https://www.dropbox.com/s/25zbthmsco6u1u6/hummingbird.png?dl=0 -O hummingbird.png", "_____no_output_____" ], [ "%%bash \nls ", "0_definiciones_generales.ipynb\n1_ecuaciones_lineales.ipynb\n2_interpolacion.ipynb\n2_interpolacion_parametrica_apoyo.ipynb\n3_minimos_cuadrados.ipynb\n4_SVD_y_reconstruccion_de_imagenes.ipynb\nKiara.png\nNota_apoyo_1_ecuaciones_lineales.ipynb\nNota_apoyo_3_minimos_cuadrados.ipynb\n__pycache__\ndata_for_nbook_3_minimos_cuadrados.txt\nhummingbird.png\nimg_bottle.png\nsolve_linear_system_of_equations.py\ntest_MSE.py\ntest_solve_linear_system_of_equations.py\nutils.py\n" ], [ "img=plt.imread('hummingbird.png')\nplt.imshow(img)\nplt.title('Colibrí')\nplt.show()", "_____no_output_____" ] ], [ [ "**Nota: Hay que dar click en el botón arriba de la figura de apagar interactividad.**", "_____no_output_____" ], [ "**De manera interactiva vamos dando click a la imagen anterior con la siguiente celda, en la lista `pos` se irán guardando las coordenadas en donde hagamos click.**", "_____no_output_____" ] ], [ [ "%matplotlib notebook\nfig, ax = plt.subplots()\npos = []\ndef onclick(event):\n pos.append([event.xdata,event.ydata])\nfig.canvas.mpl_connect('button_press_event', onclick)\nplt.title('Colibrí')\nplt.imshow(img)", "_____no_output_____" ], [ "pos", "_____no_output_____" ] ], [ [ "**Nota: una vez obtenida la lista `pos` dar click en el botón de apagado de interactividad.**", "_____no_output_____" ] ], [ [ "pos_array = np.array(pos)", "_____no_output_____" ], [ "x = pos_array[:,0]", "_____no_output_____" ] ], [ [ "Algunas entradas imprimimos de $x$:", "_____no_output_____" ] ], [ [ "x[0:10]", "_____no_output_____" ], [ "y = pos_array[:,1]", "_____no_output_____" ] ], [ [ "Algunas entradas imprimimos de $y$:", "_____no_output_____" ] ], [ [ "y[0:10]", "_____no_output_____" ] ], [ [ "Definamos nuestro parámetro $t$ en el intervalo $[0,1]$:", "_____no_output_____" ] ], [ [ "t = np.linspace(0,1, len(x))", "_____no_output_____" ], [ "t", "_____no_output_____" ] ], [ [ "Construyamos el spline para las curvas $x(t)$, $y(t)$ que nos definirán las coordenadas.", "_____no_output_____" ] ], [ [ "pw_spline_x = interp1d(t, x, kind = 'cubic') #spline piecewise \npw_spline_y = interp1d(t,y, kind = 'cubic') #spline piecewise ", "_____no_output_____" ] ], [ [ "Realicemos interpolación en $100$ puntos:", "_____no_output_____" ] ], [ [ "neval = 100\nteval = np.linspace(min(t),max(t), neval)", "_____no_output_____" ], [ "xeval = pw_spline_x(teval)\nyeval = pw_spline_y(teval)", "_____no_output_____" ], [ "print('xeval.shape:', xeval.shape[0])\nprint('yeval.shape:', yeval.shape[0])", "xeval.shape: 100\nyeval.shape: 100\n" ], [ "xeval[0:10]", "_____no_output_____" ], [ "yeval[0:10]", "_____no_output_____" ], [ "window_y = 50\nwindow_x = 500 \nx_min = np.min(x)\ny_min = np.min(y)\nx_max = np.max(x)\ny_max = np.max(y)", "_____no_output_____" ], [ "fig, ax = plt.subplots()\nax.plot(xeval,yeval)\nax.set_ylim(np.max(y)+window_y,np.min(y)-window_y)\nplt.xlim(np.min(x)-window_x,np.max(x)+window_x)\nplt.title('Colibrí con interpolación vía curva paramétrica')\nplt.show()", "_____no_output_____" ], [ "def make_plot(ax, idx):\n ax.plot(x[:idx], y[:idx])\n ax.set_ylim(y_max+window_y,y_min-window_y)\n plt.xlim(x_min-window_x,x_max+window_x)\n plt.plot(x[:idx], y[:idx], 'bo-')\n plt.title('Colibrí con interpolación vía curva paramétrica')\n fig.canvas.draw()", "_____no_output_____" ], [ "%matplotlib notebook\nfig, ax = plt.subplots()\nfor idx,_ in enumerate(t):\n make_plot(ax, idx)\n time.sleep(0.2)", "_____no_output_____" ] ], [ [ "**(Tarea) elegir una imagen y realizar interpolación con una curva paramétrica.**", "_____no_output_____" ], [ "**Referencias:**\n\n* [animated_matplotlib-binder](https://github.com/fomightez/animated_matplotlib-binder)\n\n* [how-get-a-x-y-position-pointing-with-mouse-in-a-interactive-plot-python](https://stackoverflow.com/questions/29379502/how-get-a-x-y-position-pointing-with-mouse-in-a-interactive-plot-python)\n\n* [matplotlib: invert_axes](https://matplotlib.org/3.1.1/gallery/subplots_axes_and_figures/invert_axes.html)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ] ]
c5306b2dc19177266a3105001929d8ae5314b4a7
15,287
ipynb
Jupyter Notebook
.ipynb_checkpoints/181115_counting_example-Copy1-checkpoint.ipynb
mchoimis/practical-data-science-with-hadoop-and-spark
f7d8eaa9b5974fe55fb14e5a9915e60a3957c237
[ "Apache-2.0" ]
null
null
null
.ipynb_checkpoints/181115_counting_example-Copy1-checkpoint.ipynb
mchoimis/practical-data-science-with-hadoop-and-spark
f7d8eaa9b5974fe55fb14e5a9915e60a3957c237
[ "Apache-2.0" ]
null
null
null
.ipynb_checkpoints/181115_counting_example-Copy1-checkpoint.ipynb
mchoimis/practical-data-science-with-hadoop-and-spark
f7d8eaa9b5974fe55fb14e5a9915e60a3957c237
[ "Apache-2.0" ]
null
null
null
31.008114
401
0.38935
[ [ [ "import os\nimport findspark\nfindspark.find()\nfindspark.init(os.environ.get(\"SPARK_HOME\"))\n\nimport sys\nsys.path.append(\"/Users/minjungchoi/spark/spark-2.4.0-bin-hadoop2.7/python/pyspark\")\n\nfrom pyspark import SparkConf\nfrom pyspark import SparkContext\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.types import *\nfrom pyspark.sql.functions import col, collect_list,udf,concat, lit\n\n\nconf = SparkConf().setAppName(\"comstat-test\").set(\"spark.yarn.driver.memoryOverhead\", \"2048\") \\\n .set(\"spark.yarn.executor.memoryOverhead\", \"2048\") \\\n .set(\"spark.default.parallelism\", \"116\") \\\n .set(\"spark.shuffle.compress\", \"true\") \\\n .set(\"spark.io.compression.codec\", \"snappy\")\n\nspark = SparkSession.builder.config(conf=conf).getOrCreate()\nsc = spark.sparkContext", "_____no_output_____" ], [ "sampleData = [\n\t('A','WD','1','Z001','S001'),\n\t('A','WD','1','Z002','S020'),\n\t('A','WD','2','Z005','S100'),\n\t('A','WE','3','Z001','S001'),\n\t('A','WE','2','Z002','S000'),\n\t('A','WE','1','Z001','S001'),\n\t('A','WD','3','Z001','S001'),\n\t('A','WD','4','Z001','S002'),\n\t('A','WD','4','Z002','S030'),\n\t('A','WD','3','Z003','S009'),\n\t('A','WD','1','Z001','S002'),\n\t('A','WD','2','Z002','S030'),\n\t('A','WD','3','Z001','S001'),\n\t('A','WD','4','Z003','S003'),\n\t('A','WD','4','Z003','S005'),\n\t('A','WD','4','Z001','S001'),\n\t('A','WD','3','Z005','S006'),\n\t('A','WE','2','Z006','S007'),\n\t('A','WE','3','Z001','S002'),\n\t('B','WD','1','Z001','S001'),\n\t('B','WD','1','Z002','S020'),\n\t('B','WD','2','Z005','S100'),\n\t('B','WE','3','Z001','S001'),\n\t('B','WE','2','Z002','S000'),\n\t('B','WE','1','Z001','S001'),\n\t('B','WD','3','Z001','S001'),\n\t('C','WD','4','Z001','S002'),\n\t('C','WD','4','Z002','S030'),\n\t('C','WD','3','Z003','S009'),\n\t('C','WD','1','Z001','S002'),\n\t('D','WD','2','Z002','S030'),\n\t('D','WD','3','Z001','S001'),\n\t('D','WD','4','Z003','S003'),\n\t('D','WD','4','Z003','S005'),\n\t('D','WD','4','Z001','S001'),\n\t('D','WD','3','Z005','S006'),\n\t('D','WE','2','Z006','S007'),\n\t('D','WE','3','Z001','S002'),\n\t('E','WE','1','Z003','S001'),\n ]\n\n \nfield = [\n\tStructField('CID',StringType(), True),\\\n\tStructField('WEEKDAY', StringType(), True),\\\n\tStructField('TIMESEG', StringType(), True),\\\n\tStructField('LOCATION', StringType(), True),\\\n \tStructField('SHOP', StringType(), True)]\n\nschema = StructType(field)\n\n\nsampleRDD = sc.parallelize(sampleData)\nsampleDF = spark.createDataFrame(sampleRDD,schema)\n\nprint('Table')\nprint(sampleDF.show())\n", "Table\n+---+-------+-------+--------+----+\n|CID|WEEKDAY|TIMESEG|LOCATION|SHOP|\n+---+-------+-------+--------+----+\n| A| WD| 1| Z001|S001|\n| A| WD| 1| Z002|S020|\n| A| WD| 2| Z005|S100|\n| A| WE| 3| Z001|S001|\n| A| WE| 2| Z002|S000|\n| A| WE| 1| Z001|S001|\n| A| WD| 3| Z001|S001|\n| A| WD| 4| Z001|S002|\n| A| WD| 4| Z002|S030|\n| A| WD| 3| Z003|S009|\n| A| WD| 1| Z001|S002|\n| A| WD| 2| Z002|S030|\n| A| WD| 3| Z001|S001|\n| A| WD| 4| Z003|S003|\n| A| WD| 4| Z003|S005|\n| A| WD| 4| Z001|S001|\n| A| WD| 3| Z005|S006|\n| A| WE| 2| Z006|S007|\n| A| WE| 3| Z001|S002|\n| B| WD| 1| Z001|S001|\n+---+-------+-------+--------+----+\nonly showing top 20 rows\n\nNone\n" ], [ "# cidByShopMap = sampleDF.rdd.map(lambda x : (x['CID'] +':'+ x['SHOP'], 1))\n\n# cidByShop = cidByShopMap.reduceByKey(lambda x,y : x+y)", "_____no_output_____" ], [ "# cidByShop.take(20)", "_____no_output_____" ], [ "#%%\ngetTop3List = udf(lambda x:x[0:3],StringType())", "_____no_output_____" ], [ "\n#%%\n#cross tab으로 보기\nsampleDF.crosstab('CID', 'SHOP').show()", "+--------+----+----+----+----+----+----+----+----+----+----+----+\n|CID_SHOP|S000|S001|S002|S003|S005|S006|S007|S009|S020|S030|S100|\n+--------+----+----+----+----+----+----+----+----+----+----+----+\n| E| 0| 1| 0| 0| 0| 0| 0| 0| 0| 0| 0|\n| A| 1| 6| 3| 1| 1| 1| 1| 1| 1| 2| 1|\n| B| 1| 4| 0| 0| 0| 0| 0| 0| 1| 0| 1|\n| C| 0| 0| 2| 0| 0| 0| 0| 1| 0| 1| 0|\n| D| 0| 2| 1| 1| 1| 1| 1| 0| 0| 1| 0|\n+--------+----+----+----+----+----+----+----+----+----+----+----+\n\n" ], [ "#%%\n#고객별 최다 방문 SHOP TOP3\nBySHOP = sampleDF.groupby('CID', 'SHOP').count().orderBy('CID', 'count',ascending=False)\n\nBySHOP.show()\n\nBySHOP = BySHOP.groupby('CID').agg(collect_list('SHOP').alias('SHOP3'))\\\n .withColumn('SHOP3', getTop3List('SHOP3'))\n\nBySHOP.show()", "+---+----+-----+\n|CID|SHOP|count|\n+---+----+-----+\n| E|S001| 1|\n| D|S001| 2|\n| D|S002| 1|\n| D|S030| 1|\n| D|S006| 1|\n| D|S005| 1|\n| D|S003| 1|\n| D|S007| 1|\n| C|S002| 2|\n| C|S030| 1|\n| C|S009| 1|\n| B|S001| 4|\n| B|S100| 1|\n| B|S020| 1|\n| B|S000| 1|\n| A|S001| 6|\n| A|S002| 3|\n| A|S030| 2|\n| A|S005| 1|\n| A|S007| 1|\n+---+----+-----+\nonly showing top 20 rows\n\n+---+------------------+\n|CID| SHOP3|\n+---+------------------+\n| E| [S001]|\n| B|[S001, S020, S100]|\n| D|[S001, S003, S007]|\n| C|[S002, S009, S030]|\n| A|[S001, S002, S030]|\n+---+------------------+\n\n" ], [ "#%%\n#고객별 최다 방문 지역 TOP3\nByLOCATION = sampleDF.groupby('CID', 'LOCATION').count().orderBy('CID', 'count',ascending=False)\n\nByLOCATION.show()\n\nByLOCATION = ByLOCATION.groupby('CID').agg(collect_list('LOCATION').alias('Location3'))\\\n .withColumn('Location3', getTop3List('Location3'))\n\nByLOCATION.show()", "+---+--------+-----+\n|CID|LOCATION|count|\n+---+--------+-----+\n| E| Z003| 1|\n| D| Z001| 3|\n| D| Z003| 2|\n| D| Z005| 1|\n| D| Z002| 1|\n| D| Z006| 1|\n| C| Z001| 2|\n| C| Z002| 1|\n| C| Z003| 1|\n| B| Z001| 4|\n| B| Z002| 2|\n| B| Z005| 1|\n| A| Z001| 9|\n| A| Z002| 4|\n| A| Z003| 3|\n| A| Z005| 2|\n| A| Z006| 1|\n+---+--------+-----+\n\n+---+------------------+\n|CID| Location3|\n+---+------------------+\n| E| [Z003]|\n| B|[Z001, Z002, Z005]|\n| D|[Z001, Z003, Z006]|\n| C|[Z001, Z003, Z002]|\n| A|[Z001, Z002, Z003]|\n+---+------------------+\n\n" ], [ "#%%\n#고객별 최다 방문 주중/주말 방문 SHOP TOP3\nByWEEKDAY_SHOP = sampleDF.groupby('CID', 'WEEKDAY', 'SHOP').count().orderBy('CID', 'count',ascending=False)\\\n .select('CID', (concat(col('WEEKDAY') ,lit('|'),col('SHOP')).alias('WEEKDAY_SHOP')), 'count')\nByWEEKDAY_SHOP.show()\nByWEEKDAY_SHOP = ByWEEKDAY_SHOP.groupby('CID').agg(collect_list('WEEKDAY_SHOP').alias('WEEKDAY_SHOP3'))\\\n .withColumn('WEEKDAY_SHOP3', getTop3List('WEEKDAY_SHOP3'))\n\n\nByWEEKDAY_SHOP.show()", "+---+------------+-----+\n|CID|WEEKDAY_SHOP|count|\n+---+------------+-----+\n| E| WE|S001| 1|\n| D| WD|S001| 2|\n| D| WE|S002| 1|\n| D| WD|S005| 1|\n| D| WD|S030| 1|\n| D| WE|S007| 1|\n| D| WD|S006| 1|\n| D| WD|S003| 1|\n| C| WD|S002| 2|\n| C| WD|S009| 1|\n| C| WD|S030| 1|\n| B| WE|S001| 2|\n| B| WD|S001| 2|\n| B| WD|S020| 1|\n| B| WD|S100| 1|\n| B| WE|S000| 1|\n| A| WD|S001| 4|\n| A| WD|S002| 2|\n| A| WE|S001| 2|\n| A| WD|S030| 2|\n+---+------------+-----+\nonly showing top 20 rows\n\n+---+--------------------+\n|CID| WEEKDAY_SHOP3|\n+---+--------------------+\n| E| [WE|S001]|\n| B|[WD|S001, WE|S001...|\n| D|[WD|S001, WD|S005...|\n| C|[WD|S002, WD|S030...|\n| A|[WD|S001, WD|S002...|\n+---+--------------------+\n\n" ], [ "#%%\n#고객별로 한테이블로 모으자 (굳이 모을 필요는 없고 각자 테이블로 가지고 있는게 나을 걸?)\n#별도의 고객리스트가 없으면 트랜잭션에서 빼내자\nusers = sampleDF.select('CID').distinct()\n\ntotalSummary = users.join(BySHOP, BySHOP.CID == users.CID, how='left').drop(BySHOP.CID)\n\ntotalSummary = totalSummary.join(ByLOCATION, ByLOCATION.CID == totalSummary.CID, how='left').drop(ByLOCATION.CID)\ntotalSummary = totalSummary.join(ByWEEKDAY_SHOP, ByWEEKDAY_SHOP.CID == totalSummary.CID, how='left').drop(ByWEEKDAY_SHOP.CID)\ntotalSummary.show()\n", "+---+------------------+------------------+--------------------+\n|CID| SHOP3| Location3| WEEKDAY_SHOP3|\n+---+------------------+------------------+--------------------+\n| E| [S001]| [Z003]| [WE|S001]|\n| B|[S001, S020, S100]|[Z001, Z002, Z005]|[WD|S001, WE|S001...|\n| D|[S001, S003, S007]|[Z001, Z003, Z006]|[WD|S001, WD|S005...|\n| C|[S002, S009, S030]|[Z001, Z003, Z002]|[WD|S002, WD|S030...|\n| A|[S001, S002, S030]|[Z001, Z002, Z003]|[WD|S001, WD|S002...|\n+---+------------------+------------------+--------------------+\n\n" ], [ "#%%\n## map reduce로 그냥 구현\ncidByShopMap = sampleDF.rdd.map(lambda x : (x['CID'] +':'+ x['SHOP'], 1))\n\ncidByShop = cidByShopMap.reduceByKey(lambda x,y : x+y)\n\nprint('Customer by shop')\nprint(cidByShop.take(50))\n\nprint('Customer by shop TOP 3')\nprint( cidByShop.sortBy(lambda x: x[1], ascending= False).take(3) )", "Customer by shop\n[('C:S002', 2), ('A:S030', 2), ('A:S002', 3), ('A:S007', 1), ('D:S002', 1), ('B:S100', 1), ('D:S001', 2), ('D:S006', 1), ('D:S007', 1), ('A:S005', 1), ('A:S009', 1), ('A:S020', 1), ('D:S030', 1), ('D:S005', 1), ('A:S100', 1), ('B:S020', 1), ('D:S003', 1), ('B:S000', 1), ('A:S006', 1), ('A:S001', 6), ('B:S001', 4), ('A:S003', 1), ('C:S030', 1), ('C:S009', 1), ('E:S001', 1), ('A:S000', 1)]\nCustomer by shop TOP 3\n[('A:S001', 6), ('B:S001', 4), ('A:S002', 3)]\n" ], [ "#%%\nsc.stop()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
c5306e75b7bf2e663e90b22c9581b82f9e2f1c5e
384,659
ipynb
Jupyter Notebook
BERT/Classified movies/Movies_results/Mdata-ovr.ipynb
luqmanjamilch/BERT_ExtremeSentiments
2bc487dbe976a76069e8fed2db3562ab24b06157
[ "MIT" ]
null
null
null
BERT/Classified movies/Movies_results/Mdata-ovr.ipynb
luqmanjamilch/BERT_ExtremeSentiments
2bc487dbe976a76069e8fed2db3562ab24b06157
[ "MIT" ]
null
null
null
BERT/Classified movies/Movies_results/Mdata-ovr.ipynb
luqmanjamilch/BERT_ExtremeSentiments
2bc487dbe976a76069e8fed2db3562ab24b06157
[ "MIT" ]
null
null
null
210.426149
121,904
0.898552
[ [ [ "import transformers\nfrom transformers import BertModel, BertTokenizer, AdamW, get_linear_schedule_with_warmup\nimport torch\nimport gc\ngc.collect()\ntorch.cuda.empty_cache()\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom pylab import rcParams\nimport matplotlib.pyplot as plt\nfrom matplotlib import rc\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix, classification_report\nfrom collections import defaultdict\nfrom textwrap import wrap\nfrom torch import nn, optim\nfrom torch.utils.data import Dataset, DataLoader\nimport torch.nn.functional as F\nRANDOM_SEED = 42\nnp.random.seed(RANDOM_SEED)\ntorch.manual_seed(RANDOM_SEED)\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\ndevice ", "_____no_output_____" ], [ "df_movies = pd.read_csv('datasets/classifiedMovies.txt', sep=\"\\t\", header=None, low_memory=False)", "_____no_output_____" ], [ "df_movies.columns = [\"message\",\t\"originalClassification\",\"scoreP\",\"scoreZ\",\"polarity\",\t\"termsP\",\t\"termsN\"]", "_____no_output_____" ], [ "df = df_movies", "_____no_output_____" ], [ "df['message_len'] = df['message'].astype(str).apply(len)", "_____no_output_____" ], [ "df.shape", "_____no_output_____" ], [ "df.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 31986 entries, 0 to 31985\nData columns (total 8 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 message 31986 non-null object \n 1 originalClassification 31986 non-null object \n 2 scoreP 31986 non-null float64\n 3 scoreZ 31986 non-null float64\n 4 polarity 31986 non-null object \n 5 termsP 5079 non-null object \n 6 termsN 987 non-null object \n 7 message_len 31986 non-null int64 \ndtypes: float64(2), int64(1), object(5)\nmemory usage: 2.0+ MB\n" ], [ "df.polarity.value_counts()", "_____no_output_____" ], [ "df.describe()", "_____no_output_____" ], [ "df[\"message_len\"].describe().apply(lambda x: format(x, 'f'))", "_____no_output_____" ], [ "sns.displot( df , x= \"message_len\" );", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "df.originalClassification.value_counts()", "_____no_output_____" ], [ "df.polarity.value_counts()", "_____no_output_____" ], [ "sns.countplot(df.polarity)\nplt.xlabel('classification');", "/usr/local/lib/python3.6/dist-packages/seaborn/_decorators.py:43: FutureWarning: Pass the following variable as a keyword arg: x. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.\n FutureWarning\n" ], [ "df['label'] = pd.factorize(df['polarity'])[0]", "_____no_output_____" ], [ "sns.countplot(df.label)\nplt.xlabel('classification');", "/usr/local/lib/python3.6/dist-packages/seaborn/_decorators.py:43: FutureWarning: Pass the following variable as a keyword arg: x. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.\n FutureWarning\n" ], [ "df['label'] = pd.factorize(df['polarity'])[0]", "_____no_output_____" ], [ "# Class count\ncount_class_0, count_class_1,count_class_2 = df.polarity.value_counts()\n\n# Divide by class\ndf_class_0 = df[df['label'] == 0]\ndf_class_1 = df[df['label'] == 1]\ndf_class_2 = df[df['label'] == 2]", "_____no_output_____" ], [ "df_class_2_over = pd.concat([df_class_2]*20, ignore_index=False)", "_____no_output_____" ], [ "df_class_2_over", "_____no_output_____" ], [ "df_class_0_under = df_class_0.sample(count_class_1)\ndf_class_2_over = df_class_2.sample(n=5596, replace=True)\n\ndf_test_over = pd.concat([df_class_1, df_class_0_under,df_class_2_over], axis=0)\n\nprint('Random over-sampling:')\nprint(df_test_over.polarity.value_counts())\n\ndf_test_overdf_test_over.polarity.value_counts().plot(kind='bar', title='Count (polarity)');", "Random over-sampling:\nNegative Extreme 5596\nPositive Extreme 5596\nInconclusive 5596\nName: polarity, dtype: int64\n" ], [ "test_df_rest =pd.merge(df,df_test_over, indicator=True, how='outer').query('_merge==\"left_only\"').drop('_merge', axis=1)", "_____no_output_____" ], [ "test_df_rest.label.value_counts()", "_____no_output_____" ], [ "test_df_rest = pd.concat([test_df_rest, df_class_1,df_class_2], axis=0)", "_____no_output_____" ], [ "test_df_rest.label.value_counts()", "_____no_output_____" ], [ "PRE_TRAINED_MODEL_NAME = 'bert-base-cased'", "_____no_output_____" ], [ "tokenizer = BertTokenizer.from_pretrained(PRE_TRAINED_MODEL_NAME, return_dict=False)", "_____no_output_____" ], [ "sample_txt = 'When was I last outside? I am stuck at home for 2 weeks.'", "_____no_output_____" ], [ "encoding = tokenizer.encode_plus(\nsample_txt,\nmax_length=32, # sequence length\nadd_special_tokens=True, # Add '[CLS]' and '[SEP]'\nreturn_token_type_ids=False,\npadding='max_length',\nreturn_attention_mask=True,\nreturn_tensors='pt', # Return PyTorch tensors(use tf for tensorflow and keras)\n)\nencoding.keys()", "_____no_output_____" ], [ "token_lens = []\nfor txt in df.message:\n tokens = tokenizer.encode(txt, truncation=True)\n token_lens.append(len(tokens))", "_____no_output_____" ], [ "%matplotlib inline\n%config InlineBackend.figure_format='retina'\nsns.set(style='whitegrid', palette='muted', font_scale=1.2)\nHAPPY_COLORS_PALETTE = [\"#01BEFE\", \"#FFDD00\", \"#FF7D00\", \"#FF006D\", \"#ADFF02\", \"#8F00FF\"]\nsns.set_palette(sns.color_palette(HAPPY_COLORS_PALETTE))\nrcParams['figure.figsize'] = 12, 8", "_____no_output_____" ], [ "sns.distplot(token_lens)\nplt.xlim([0, 256]);\nplt.xlabel('Token count');", "/usr/local/lib/python3.6/dist-packages/seaborn/distributions.py:2557: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms).\n warnings.warn(msg, FutureWarning)\n" ], [ "MAX_LEN = 90", "_____no_output_____" ], [ "class ExtremsSentiDataset(Dataset):\n\n def __init__(self, reviews, targets, tokenizer, max_len):\n self.reviews = reviews\n self.targets = targets\n self.tokenizer = tokenizer\n self.max_len = max_len\n def __len__(self):\n return len(self.reviews)\n def __getitem__(self, item):\n\n review = str(self.reviews[item])\n target = self.targets[item]\n encoding = self.tokenizer.encode_plus(\n review,\n add_special_tokens=True,\n max_length=self.max_len,\n return_token_type_ids=False,\n padding='max_length',\n return_attention_mask=True,\n return_tensors='pt',\n )\n return {\n 'review_text': review,\n 'input_ids': encoding['input_ids'].flatten(),\n 'attention_mask': encoding['attention_mask'].flatten(),\n 'targets': torch.tensor(target, dtype=torch.long)\n }", "_____no_output_____" ], [ "df_train, df_test = train_test_split(\n df_test_over,\n test_size=0.2,\n random_state=RANDOM_SEED\n)\ndf_val, df_test = train_test_split(\n df_test,\n test_size=0.5,\n random_state=RANDOM_SEED\n)", "_____no_output_____" ], [ "df_train.shape, df_val.shape, df_test.shape", "_____no_output_____" ], [ "def create_data_loader(df, tokenizer, max_len, batch_size):\n ds = ExtremsSentiDataset(\n reviews=df.message.to_numpy(),\n targets=df.label.to_numpy(),\n tokenizer=tokenizer,\n max_len=max_len\n )\n return DataLoader(\n ds,\n batch_size=batch_size,\n num_workers=2\n )", "_____no_output_____" ], [ "rest_df = pd.merge(a,b, indicator=True, how='outer').query('_merge==\"left_only\"').drop('_merge', axis=1)", "_____no_output_____" ], [ "#rest_test = df1['Email'].isin(df2['Email'])\n#df1.drop(df1[cond].index, inplace = True)", "_____no_output_____" ], [ "BATCH_SIZE = 16\ntrain_data_loader = create_data_loader(df_train, tokenizer, MAX_LEN, BATCH_SIZE)\nval_data_loader = create_data_loader(df_val, tokenizer, MAX_LEN, BATCH_SIZE)\ntest_data_loader = create_data_loader(df_test, tokenizer, MAX_LEN, BATCH_SIZE)\nall_test_data_loader = create_data_loader(test_df_rest, tokenizer, MAX_LEN, BATCH_SIZE)", "_____no_output_____" ], [ "data = next(iter(train_data_loader))\ndata.keys()\n", "_____no_output_____" ], [ "bert_model = BertModel.from_pretrained(PRE_TRAINED_MODEL_NAME)", "_____no_output_____" ], [ "last_hidden_state, pooled_output = bert_model(\n input_ids=encoding['input_ids'],\n attention_mask=encoding['attention_mask'],\n return_dict=False\n)", "_____no_output_____" ], [ "encoding['input_ids']", "_____no_output_____" ], [ "last_hidden_state.shape", "_____no_output_____" ], [ "bert_model.config.hidden_size", "_____no_output_____" ], [ "pooled_output.shape", "_____no_output_____" ], [ "class SentimentClassifier(nn.Module):\n def __init__(self, n_classes):\n super(SentimentClassifier, self).__init__()\n self.bert = BertModel.from_pretrained(PRE_TRAINED_MODEL_NAME, return_dict=False)\n self.drop = nn.Dropout(p=0.3)\n self.out = nn.Linear(self.bert.config.hidden_size, n_classes)\n def forward(self, input_ids, attention_mask):\n _, pooled_output = self.bert(\n input_ids=input_ids,\n attention_mask=attention_mask\n )\n output = self.drop(pooled_output)\n return self.out(output)", "_____no_output_____" ], [ "class_names = [\"Inconclusive\",\"Positive Extreme\",\"Negative Extreme\"]", "_____no_output_____" ], [ "model = SentimentClassifier(len(class_names))\nmodel = model.to(device)", "_____no_output_____" ], [ "input_ids = data['input_ids'].to(device)\nattention_mask = data['attention_mask'].to(device)\nprint(input_ids.shape) # batch size x seq length\nprint(attention_mask.shape) # batch size x seq length", "torch.Size([16, 90])\ntorch.Size([16, 90])\n" ], [ "F.softmax(model(input_ids, attention_mask), dim=1)", "_____no_output_____" ], [ "EPOCHS = 10\noptimizer = AdamW(model.parameters(), lr=2e-5, correct_bias=False)\ntotal_steps = len(train_data_loader) * EPOCHS\nscheduler = get_linear_schedule_with_warmup(\n optimizer,\n num_warmup_steps=0,\n num_training_steps=total_steps\n)\nloss_fn = nn.CrossEntropyLoss().to(device)", "_____no_output_____" ], [ "def train_epoch(\n model,\n data_loader,\n loss_fn,\n optimizer,\n device,\n scheduler,\n n_examples\n):\n model = model.train()\n losses = []\n correct_predictions = 0\n for d in data_loader:\n input_ids = d[\"input_ids\"].to(device)\n attention_mask = d[\"attention_mask\"].to(device)\n targets = d[\"targets\"].to(device)\n outputs = model(\n input_ids=input_ids,\n attention_mask=attention_mask\n )\n _, preds = torch.max(outputs, dim=1)\n loss = loss_fn(outputs, targets)\n correct_predictions += torch.sum(preds == targets)\n losses.append(loss.item())\n loss.backward()\n nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)\n optimizer.step()\n scheduler.step()\n optimizer.zero_grad()\n return correct_predictions.double() / n_examples, np.mean(losses)", "_____no_output_____" ], [ "def eval_model(model, data_loader, loss_fn, device, n_examples):\n model = model.eval()\n losses = []\n correct_predictions = 0\n with torch.no_grad():\n for d in data_loader:\n input_ids = d[\"input_ids\"].to(device)\n attention_mask = d[\"attention_mask\"].to(device)\n targets = d[\"targets\"].to(device)\n outputs = model(\n input_ids=input_ids,\n attention_mask=attention_mask\n )\n _, preds = torch.max(outputs, dim=1)\n loss = loss_fn(outputs, targets)\n correct_predictions += torch.sum(preds == targets)\n losses.append(loss.item())\n return correct_predictions.double() / n_examples, np.mean(losses)", "_____no_output_____" ], [ "%%time\nhistory = defaultdict(list)\nbest_accuracy = 0\nfor epoch in range(EPOCHS):\n print(f'Epoch {epoch + 1}/{EPOCHS}')\n print('-' * 10)\n train_acc, train_loss = train_epoch(\n model,\n train_data_loader,\n loss_fn,\n optimizer,\n device,\n scheduler,\n len(df_train)\n )\n print(f'Train loss {train_loss} accuracy {train_acc}')\n val_acc, val_loss = eval_model(\n model,\n val_data_loader,\n loss_fn,\n device,\n len(df_val)\n )\n print(f'Val loss {val_loss} accuracy {val_acc}')\n print()\n history['train_acc'].append(train_acc)\n history['train_loss'].append(train_loss)\n history['val_acc'].append(val_acc)\n history['val_loss'].append(val_loss)\n if val_acc > best_accuracy:\n torch.save(model.state_dict(), 'best_model_state.bin')\n best_accuracy = val_acc", "Epoch 1/10\n----------\nTrain loss 0.45627013824081847 accuracy 0.8175725986597171\nVal loss 0.23234413550013588 accuracy 0.9326980345443717\n\nEpoch 2/10\n----------\nTrain loss 0.1814330510276791 accuracy 0.950186150409531\nVal loss 0.18690529101129089 accuracy 0.9523525908278738\n\nEpoch 3/10\n----------\nTrain loss 0.1430551942867515 accuracy 0.9620253164556963\nVal loss 0.15866826798412062 accuracy 0.9583085169743896\n\nEpoch 4/10\n----------\nTrain loss 0.13114961613402037 accuracy 0.9642591213700671\nVal loss 0.16053591986142454 accuracy 0.9600952948183443\n\nEpoch 5/10\n----------\nTrain loss 0.1256366976622736 accuracy 0.964854802680566\nVal loss 0.15207734275609255 accuracy 0.9612864800476475\n\nEpoch 6/10\n----------\nTrain loss 0.12546040204719508 accuracy 0.9650037230081907\nVal loss 0.15133400298655034 accuracy 0.9630732578916023\n\nEpoch 7/10\n----------\nTrain loss 0.12506964649933036 accuracy 0.9650037230081907\nVal loss 0.174538921511599 accuracy 0.9594997022036927\n\nEpoch 8/10\n----------\nTrain loss 0.12070944725059062 accuracy 0.9655994043186896\nVal loss 0.1822477352450646 accuracy 0.957712924359738\n\nEpoch 9/10\n----------\nTrain loss 0.12209694708026723 accuracy 0.9653760238272525\nVal loss 0.17848169549944856 accuracy 0.9583085169743896\n\nEpoch 10/10\n----------\nTrain loss 0.12178017088209302 accuracy 0.9657483246463143\nVal loss 0.1776362424627656 accuracy 0.9612864800476475\n\nCPU times: user 1h 1min 57s, sys: 25min 16s, total: 1h 27min 14s\nWall time: 1h 27min 26s\n" ], [ "plt.plot(history['train_acc'], label='train accuracy')\nplt.plot(history['val_acc'], label='validation accuracy')\nplt.title('Training history')\nplt.ylabel('Accuracy')\nplt.xlabel('Epoch')\nplt.legend()\nplt.ylim([0, 1]);", "_____no_output_____" ], [ "test_acc, _ = eval_model(\n model,\n test_data_loader,\n loss_fn,\n device,\n len(test_df_rest)\n)\ntest_acc.item()\n", "_____no_output_____" ], [ "def get_predictions(model, data_loader):\n model = model.eval()\n review_texts = []\n predictions = []\n prediction_probs = []\n real_values = []\n with torch.no_grad():\n for d in data_loader:\n texts = d[\"review_text\"]\n input_ids = d[\"input_ids\"].to(device)\n attention_mask = d[\"attention_mask\"].to(device)\n targets = d[\"targets\"].to(device)\n outputs = model(\n input_ids=input_ids,\n attention_mask=attention_mask\n )\n _, preds = torch.max(outputs, dim=1)\n review_texts.extend(texts)\n predictions.extend(preds)\n prediction_probs.extend(outputs)\n real_values.extend(targets)\n predictions = torch.stack(predictions).cpu()\n prediction_probs = torch.stack(prediction_probs).cpu()\n real_values = torch.stack(real_values).cpu()\n return review_texts, predictions, prediction_probs, real_values", "_____no_output_____" ], [ "y_review_texts, y_pred, y_pred_probs, y_test = get_predictions(\n model,\n all_test_data_loader\n)", "_____no_output_____" ], [ "print(classification_report(y_test, y_pred, target_names=class_names))\n", " precision recall f1-score support\n\n Inconclusive 1.00 0.98 0.99 12678\nPositive Extreme 0.96 0.90 0.93 5596\nNegative Extreme 0.30 1.00 0.46 282\n\n accuracy 0.95 18556\n macro avg 0.75 0.96 0.79 18556\n weighted avg 0.98 0.95 0.96 18556\n\n" ], [ "def show_confusion_matrix(confusion_matrix):\n hmap = sns.heatmap(confusion_matrix, annot=True, fmt=\"d\", cmap=\"Blues\")\n hmap.yaxis.set_ticklabels(hmap.yaxis.get_ticklabels(), rotation=0, ha='right')\n hmap.xaxis.set_ticklabels(hmap.xaxis.get_ticklabels(), rotation=30, ha='right')\n plt.ylabel('True sentiment')\n plt.xlabel('Predicted sentiment');\ncm = confusion_matrix(y_test, y_pred)\ndf_cm = pd.DataFrame(cm, index=class_names, columns=class_names)\nshow_confusion_matrix(df_cm)", "_____no_output_____" ], [ "idx = 5\nreview_text = y_review_texts[idx]\ntrue_sentiment = y_test[idx]\npred_df = pd.DataFrame({\n 'class_names': class_names,\n 'values': y_pred_probs[idx]\n})", "_____no_output_____" ], [ "print(\"\\n\".join(wrap(review_text)))\nprint()\nprint(f'True sentiment: {class_names[true_sentiment]}')", "the acting , costumes , music , cinematography and sound are all\nastounding given the production's austere locales .\n\nTrue sentiment: Positive Extreme\n" ], [ "sns.barplot(x='values', y='class_names', data=pred_df, orient='h')\nplt.ylabel('sentiment')\nplt.xlabel('probability')\nplt.xlim([0, 1]);", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
c530716de4f5b212e74093550a79a4a771559b00
1,617
ipynb
Jupyter Notebook
.ipynb_checkpoints/scrape_mars.py-checkpoint.ipynb
ankita-03/web-scraping-project
cf8050c2a828f316b65de2d0f4e3b8e7d461d216
[ "ADSL" ]
null
null
null
.ipynb_checkpoints/scrape_mars.py-checkpoint.ipynb
ankita-03/web-scraping-project
cf8050c2a828f316b65de2d0f4e3b8e7d461d216
[ "ADSL" ]
null
null
null
.ipynb_checkpoints/scrape_mars.py-checkpoint.ipynb
ankita-03/web-scraping-project
cf8050c2a828f316b65de2d0f4e3b8e7d461d216
[ "ADSL" ]
null
null
null
21.851351
116
0.547928
[ [ [ "import pandas as pd\nfrom splinter import Browser\nfrom webdriver_manager.chrome import ChromeDriverManager \nfrom bs4 import BeautifulSoup as soup ", "_____no_output_____" ], [ "executable_path = {'executable_path': ChromeDriverManager().install()}\nbrowser = Browser('chrome', **executable_path, headless=True)", "\n\n====== WebDriver manager ======\nCurrent google-chrome version is 96.0.4664\nGet LATEST chromedriver version for 96.0.4664 google-chrome\nDriver [C:\\Users\\ASG\\.wdm\\drivers\\chromedriver\\win32\\96.0.4664.45\\chromedriver.exe] found in cache\n" ], [ "def scrape: \n ", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
c53076e5d40230813c243a3f7193ab83202260c4
670,970
ipynb
Jupyter Notebook
Notebooks/Model Building.ipynb
mbrown-amc/NFL-Positional-Spending-Analysis-from-2017-2020
8866c7453abd4b9fb1b1445dd6115a2a6d706b9a
[ "MIT" ]
null
null
null
Notebooks/Model Building.ipynb
mbrown-amc/NFL-Positional-Spending-Analysis-from-2017-2020
8866c7453abd4b9fb1b1445dd6115a2a6d706b9a
[ "MIT" ]
null
null
null
Notebooks/Model Building.ipynb
mbrown-amc/NFL-Positional-Spending-Analysis-from-2017-2020
8866c7453abd4b9fb1b1445dd6115a2a6d706b9a
[ "MIT" ]
null
null
null
108.888348
39,600
0.765101
[ [ [ "# Importing libraries and utils", "_____no_output_____" ] ], [ [ "import utils\nimport pandas as pd", "_____no_output_____" ] ], [ [ "# Loading the data", "_____no_output_____" ] ], [ [ "offense, defense = utils.get_data(\"stats\")\nsalary = utils.get_data(\"salary\")\nAFC, NFC = utils.get_data(\"wins\")", "_____no_output_____" ] ], [ [ "# Verifying the data loaded correctly", "_____no_output_____" ] ], [ [ "offense[2]", "_____no_output_____" ], [ "defense[3]", "_____no_output_____" ], [ "salary", "_____no_output_____" ], [ "AFC[0]", "_____no_output_____" ], [ "NFC[2]", "_____no_output_____" ] ], [ [ "# Cleaning the data", "_____no_output_____" ] ], [ [ "Salary = utils.clean_data(\"salary\", test = salary)", "_____no_output_____" ], [ "Stats = utils.clean_data(\"stats\", offense = offense, defense = defense)", "_____no_output_____" ], [ "Wins = utils.clean_data(\"wins\", AFCl = AFC, NFCl = NFC)", "_____no_output_____" ] ], [ [ "# Verifying the data cleaned correctly", "_____no_output_____" ] ], [ [ "Salary", "_____no_output_____" ], [ "Stats", "_____no_output_____" ], [ "Wins", "_____no_output_____" ] ], [ [ "# Beginning cluster analysis", "_____no_output_____" ] ], [ [ "CSalary = Salary.drop([\"YEAR\", \"TEAM\"], axis = 1)", "_____no_output_____" ], [ "utils.find_clusters(CSalary)", "C:\\Users\\Michael\\anaconda3\\lib\\site-packages\\sklearn\\utils\\deprecation.py:143: FutureWarning: The sklearn.metrics.classification module is deprecated in version 0.22 and will be removed in version 0.24. The corresponding classes / functions should instead be imported from sklearn.metrics. Anything that cannot be imported from sklearn.metrics is now part of the private API.\n warnings.warn(message, FutureWarning)\nC:\\Users\\Michael\\anaconda3\\lib\\site-packages\\yellowbrick\\utils\\kneed.py:182: YellowbrickWarning: No \"knee\" or \"elbow point\" detected This could be due to bad clustering, no actual clusters being formed etc.\n warnings.warn(warning_message, YellowbrickWarning)\nC:\\Users\\Michael\\anaconda3\\lib\\site-packages\\yellowbrick\\utils\\kneed.py:140: YellowbrickWarning: No 'knee' or 'elbow point' detected This could be due to bad clustering, no actual clusters being formed etc.\n warnings.warn(warning_message, YellowbrickWarning)\nC:\\Users\\Michael\\anaconda3\\lib\\site-packages\\yellowbrick\\cluster\\elbow.py:343: YellowbrickWarning: No 'knee' or 'elbow' point detected, pass `locate_elbow=False` to remove the warning\n warnings.warn(warning_message, YellowbrickWarning)\n" ], [ "SCSalary = utils.scale_data(CSalary)", "_____no_output_____" ], [ "utils.find_clusters(SCSalary)", "C:\\Users\\Michael\\anaconda3\\lib\\site-packages\\yellowbrick\\utils\\kneed.py:182: YellowbrickWarning: No \"knee\" or \"elbow point\" detected This could be due to bad clustering, no actual clusters being formed etc.\n warnings.warn(warning_message, YellowbrickWarning)\nC:\\Users\\Michael\\anaconda3\\lib\\site-packages\\yellowbrick\\utils\\kneed.py:140: YellowbrickWarning: No 'knee' or 'elbow point' detected This could be due to bad clustering, no actual clusters being formed etc.\n warnings.warn(warning_message, YellowbrickWarning)\nC:\\Users\\Michael\\anaconda3\\lib\\site-packages\\yellowbrick\\cluster\\elbow.py:343: YellowbrickWarning: No 'knee' or 'elbow' point detected, pass `locate_elbow=False` to remove the warning\n warnings.warn(warning_message, YellowbrickWarning)\n" ], [ "#The scores after scaling are significantly worse. Considering that all of the salary numbers are in the same unit (% of cap), maybe it is best not to scale here afterall\n", "_____no_output_____" ] ], [ [ "# Using PCA", "_____no_output_____" ] ], [ [ "utils.pca_exp_var(CSalary)", "_____no_output_____" ], [ "PCSalary = utils.pca(CSalary, .99)", "the explained variance ratio is 0.9930379970380513\nThe shape of the original data is (128, 22)\nThe shape after pca is (128, 17)\n" ], [ "utils.find_clusters(PCSalary)", "C:\\Users\\Michael\\anaconda3\\lib\\site-packages\\yellowbrick\\utils\\kneed.py:182: YellowbrickWarning: No \"knee\" or \"elbow point\" detected This could be due to bad clustering, no actual clusters being formed etc.\n warnings.warn(warning_message, YellowbrickWarning)\nC:\\Users\\Michael\\anaconda3\\lib\\site-packages\\yellowbrick\\utils\\kneed.py:140: YellowbrickWarning: No 'knee' or 'elbow point' detected This could be due to bad clustering, no actual clusters being formed etc.\n warnings.warn(warning_message, YellowbrickWarning)\nC:\\Users\\Michael\\anaconda3\\lib\\site-packages\\yellowbrick\\cluster\\elbow.py:343: YellowbrickWarning: No 'knee' or 'elbow' point detected, pass `locate_elbow=False` to remove the warning\n warnings.warn(warning_message, YellowbrickWarning)\n" ], [ "# 6 clusters appears to be a good choice for silhouette score, I choose this number", "_____no_output_____" ] ], [ [ "# Clustering the data using KMeans", "_____no_output_____" ] ], [ [ "clusters = utils.cluster_data(PCSalary, 6)", "_____no_output_____" ], [ "clusters", "_____no_output_____" ] ], [ [ "# Adding the cluster assignments to the unscaled data for easier interpretation", "_____no_output_____" ] ], [ [ "SalaryClustered = utils.add_clusters(Salary, clusters)", "_____no_output_____" ], [ "SalaryClustered", "_____no_output_____" ] ], [ [ "# Graphing components from PCA", "_____no_output_____" ] ], [ [ "pcadf = pd.DataFrame(PCSalary)\n\npcadf.columns = (\"PC1\", \"PC2\",\"PC3\", \"PC4\", \"PC5\", \"PC6\", \"PC7\", \"PC8\", \"PC9\", \"PC10\", \"PC11\", \"PC12\", \"PC13\", \"PC14\", \"PC15\", \"PC16\", \"PC17\")\n\npcadf = utils.add_clusters(pcadf, clusters)\n\ncluster0, cluster1, cluster2, cluster3, cluster4, cluster5 = utils.break_clusters(pcadf)", "_____no_output_____" ], [ "utils.plot(cluster0, cluster1, cluster2, cluster3, cluster4, cluster5, \"PC1\", \"PC2\", \"Component 1\", \"Component 2\", \"Cluster 0\", \"Cluster 1\", \"Cluster 2\", \"Cluster 3\", \"Cluster 4\", \"Cluster 5\")", "_____no_output_____" ], [ "utils.plot(cluster0, cluster1, cluster2, cluster3, cluster4, cluster5, \"PC2\", \"PC3\", \"Component 2\", \"Component 3\", \"Cluster 0\", \"Cluster 1\", \"Cluster 2\", \"Cluster 3\", \"Cluster 4\", \"Cluster 5\")", "_____no_output_____" ] ], [ [ "# Examining the clustered salary data", "_____no_output_____" ] ], [ [ "SalaryClustered.groupby([\"cluster\"]).count()", "_____no_output_____" ], [ "SalaryClustered.groupby([\"cluster\"]).mean()", "_____no_output_____" ], [ "SalaryClustered.groupby([\"cluster\"]).std()", "_____no_output_____" ], [ "SalaryClustered[\"Offense\"] = SalaryClustered[\"QB\"] + SalaryClustered[\"RB\"] + SalaryClustered[\"FB\"] + SalaryClustered[\"WR\"] + SalaryClustered[\"TE\"] + SalaryClustered[\"T\"] + SalaryClustered[\"RT\"] + SalaryClustered[\"LT\"] + SalaryClustered[\"G\"] + SalaryClustered[\"C\"]\nSalaryClustered[\"Defense\"] = SalaryClustered[\"DE\"] + SalaryClustered[\"DT\"] + SalaryClustered[\"OLB\"] + SalaryClustered[\"ILB\"] + SalaryClustered[\"LB\"] + SalaryClustered[\"CB\"] + SalaryClustered[\"SS\"] + SalaryClustered[\"FS\"] + SalaryClustered[\"S\"]\nSalaryClustered[\"Special Teams\"] = SalaryClustered[\"K\"] + SalaryClustered[\"P\"] + SalaryClustered[\"LS\"]", "_____no_output_____" ], [ "SalaryClustered.groupby([\"cluster\"]).mean()", "_____no_output_____" ], [ "cluster0, cluster1, cluster2, cluster3, cluster4, cluster5 = utils.break_clusters(SalaryClustered)", "_____no_output_____" ], [ "utils.plot(cluster0, cluster1, cluster2, cluster3, cluster4, cluster5, \"Offense\", \"Defense\", \"% Of Cap Spent on Offense\", \"% Of Cap Spent on Defense\", \"Cluster 0\", \"Cluster 1\", \"Cluster 2\", \"Cluster 3\", \"Cluster 4\", \"Cluster 5\")", "_____no_output_____" ] ], [ [ "# Adding win % and stats to check spending value", "_____no_output_____" ] ], [ [ "WSalaryClustered = (SalaryClustered.merge(Wins, how='inner', on=[\"YEAR\",\"TEAM\"]))\nSWSalaryClustered = (WSalaryClustered.merge(Stats, how='inner', on=[\"YEAR\",\"TEAM\"]))", "_____no_output_____" ], [ "SWSalaryClustered.groupby([\"cluster\"]).mean()", "_____no_output_____" ], [ "cluster0, cluster1, cluster2, cluster3, cluster4, cluster5 = utils.break_clusters(SWSalaryClustered)", "_____no_output_____" ], [ "utils.plot(cluster0, cluster1, cluster2, cluster3, cluster4, cluster5, \"Yds_x\", \"Offense\", \"Offensive Yards Per Game\", \"% Of Cap Spent on Offense\", \"Cluster 0\", \"Cluster 1\", \"Cluster 2\", \"Cluster 3\", \"Cluster 4\", \"Cluster 5\")", "_____no_output_____" ], [ "utils.plot(cluster0, cluster1, cluster2, cluster3, cluster4, cluster5, \"Yds_y\", \"Defense\", \"Deffensive Yards Per Game\", \"% Of Cap Spent on Defense\", \"Cluster 0\", \"Cluster 1\", \"Cluster 2\", \"Cluster 3\", \"Cluster 4\", \"Cluster 5\")", "_____no_output_____" ], [ "utils.plot(cluster0, cluster1, cluster2, cluster3, cluster4, cluster5, \"Yds.1_x\", \"QB\", \"Offensive Passing Yards Per Game\", \"% Of Cap Spent on QB\", \"Cluster 0\", \"Cluster 1\", \"Cluster 2\", \"Cluster 3\", \"Cluster 4\", \"Cluster 5\")", "_____no_output_____" ], [ "utils.plot(cluster0, cluster1, cluster2, cluster3, cluster4, cluster5, \"Yds.1_x\", \"WR\", \"Offensive Passing Yards Per Game\", \"% Of Cap Spent on WR\", \"Cluster 0\", \"Cluster 1\", \"Cluster 2\", \"Cluster 3\", \"Cluster 4\", \"Cluster 5\")", "_____no_output_____" ], [ "utils.plot(cluster0, cluster1, cluster2, cluster3, cluster4, cluster5, \"Yds.2_x\", \"RB\", \"Offensive Rushing Yards Per Game\", \"% Of Cap Spent on RB\", \"Cluster 0\", \"Cluster 1\", \"Cluster 2\", \"Cluster 3\", \"Cluster 4\", \"Cluster 5\")", "_____no_output_____" ], [ "utils.plot(cluster0, cluster1, cluster2, cluster3, cluster4, cluster5, \"Yds.1_y\", \"CB\", \"Defensive Passing Yards Per Game\", \"% Of Cap Spent on CB\", \"Cluster 0\", \"Cluster 1\", \"Cluster 2\", \"Cluster 3\", \"Cluster 4\", \"Cluster 5\")", "_____no_output_____" ], [ "utils.plot(cluster0, cluster1, cluster2, cluster3, cluster4, cluster5, \"Offense\", \"W%\", \"% Of Cap Spent on Offense\", \"Win Percentage\", \"Cluster 0\", \"Cluster 1\", \"Cluster 2\", \"Cluster 3\", \"Cluster 4\", \"Cluster 5\")", "_____no_output_____" ], [ "utils.plot(cluster0, cluster1, cluster2, cluster3, cluster4, cluster5, \"Defense\", \"W%\", \"% Of Cap Spent on Defense\", \"Win Percentage\", \"Cluster 0\", \"Cluster 1\", \"Cluster 2\", \"Cluster 3\", \"Cluster 4\", \"Cluster 5\")", "_____no_output_____" ], [ "utils.plot(cluster0, cluster1, cluster2, cluster3, cluster4, cluster5, \"QB\", \"W%\", \"% Of Cap Spent on QB\", \"Win Percentage\", \"Cluster 0\", \"Cluster 1\", \"Cluster 2\", \"Cluster 3\", \"Cluster 4\", \"Cluster 5\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
c530789bd1083b09cc8271ecabb59baef9aa834f
915,674
ipynb
Jupyter Notebook
Smit/Sem V/CS/Set_3.ipynb
Vishu26/Academics_Projects
d390a90b2740b6bd85381ca3a5292c42c29196a1
[ "MIT" ]
3
2021-11-08T03:54:46.000Z
2021-11-08T04:31:52.000Z
Smit/Sem V/CS/Set_3.ipynb
Vishu26/Academics_Projects
d390a90b2740b6bd85381ca3a5292c42c29196a1
[ "MIT" ]
1
2020-02-08T20:16:17.000Z
2020-02-08T20:24:31.000Z
Smit/Sem V/CS/Set_3.ipynb
Vishu26/Academics_Projects
d390a90b2740b6bd85381ca3a5292c42c29196a1
[ "MIT" ]
2
2022-01-28T05:28:33.000Z
2022-01-28T05:28:54.000Z
232.936657
48,406
0.85613
[ [ [ "[View in Colaboratory](https://colab.research.google.com/github/nishi1612/SC374-Computational-and-Numerical-Methods/blob/master/Set_3.ipynb)", "_____no_output_____" ], [ "Set 3\n---", "_____no_output_____" ], [ "**Finding roots of polynomial by bisection method**", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nfrom google.colab import files", "_____no_output_____" ], [ "def iterations(n, arr , i):\n plt.plot(range(n),arr)\n plt.xlabel('No. of iterations')\n plt.ylabel('Value of c')\n plt.grid(True)\n plt.savefig(\"Iterations\" + str(i) + \".png\")\n files.download(\"Iterations\" + str(i) + \".png\")\n plt.show()", "_____no_output_____" ], [ "def graph(i):\n plt.xlabel('x')\n plt.ylabel('y')\n plt.grid(True)\n plt.legend(loc='upper right')\n plt.savefig(\"Graph\" + str(i) + \".png\")\n files.download(\"Graph\" + str(i) + \".png\")\n plt.show()", "_____no_output_____" ], [ "def bissection( a,b,epsilon,k):\n table = pd.DataFrame(columns=['a','b','c','b-c','f(a)*f(c)','Assign'])\n c = (a+b)/2;\n dist = b-c;\n i = 0\n arr = []\n while(dist>epsilon):\n ans_a = func(a,k);\n ans_b = func(b,k);\n ans_c = func(c,k);\n ans = \"\"\n if(ans_a*ans_c < 0):\n b=c;\n ans = \"b=c\"\n else:\n a=c;\n ans = \"a=c\";\n table.loc[i] = [a,b,c,dist,ans_a*ans_c,ans]\n arr.append(c)\n i = i+1\n c = (a+b) / 2\n dist = b-c \n return (a+b)/2 ,i , arr , table;", "_____no_output_____" ], [ "def func(x,k):\n if k==1:\n return x**6 - x - 1;\n elif k==2:\n return x**3 - x**2 - x - 1;\n elif k==3:\n return x - 1 - 0.3*math.cos(x);\n elif k==4:\n return 0.5 + math.sin(x) - math.cos(x);\n elif k==5:\n return x - math.e**(-x);\n elif k==6:\n return math.e**(-x) - math.sin(x);\n elif k==7:\n return x**3 - 2*x - 2;\n elif k==8:\n return x**4 - x - 1;\n elif k==9:\n return math.e**(x) - x - 2;\n elif k==10:\n return 1- x + math.sin(x);\n elif k==11:\n return x - math.tan(x);", "_____no_output_____" ], [ "x = np.arange(-2,3,0.001)\nplt.plot(x,x**6,label='$x^6$')\nplt.plot(x,x+1,label=\"x+1\")\ngraph(1)\nplt.plot(x**6-x-1,label='$x^6$ - x - 1')\ngraph(1)", "_____no_output_____" ], [ "a , n , arr , table = bissection(1,2,0.001,1)\niterations(n,arr,1)\nprint(str(a) + \"\\n\" + str(func(a,1)))\ntable", "_____no_output_____" ], [ "b , n , arr , table = bissection(-1,0,0.001,1)\niterations(n,arr,1)\nprint(str(b) + \"\\n\" + str(func(b,1)))\ntable", "_____no_output_____" ], [ "x = np.arange(-2,3,0.001)\nplt.plot(x,x**3,label='$x^3$')\nplt.plot(x,x**2 + x + 1,label='$x^2 + x + 1$')\ngraph(2)\nplt.plot(x**3 - (x**2 + x + 1),label='$x^3 - x^2 - x - 1$')\ngraph(2)", "_____no_output_____" ], [ "a , n , arr, table = bissection(1,2,0.0001,2)\niterations(n,arr,2)\nprint(str(a) + \"\\n\" + str(func(a,2)))\ntable", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "x = np.arange(-3,5,0.001)\nplt.plot(x,x-1,label='$x-1$')\nplt.plot(x,0.3*np.cos(x),label='$0.3cos(x)$')\ngraph(3)\nplt.plot(x,x-1-0.3*np.cos(x) , label='$x - 1 - 0.3cos(x)$')\ngraph(3)", "_____no_output_____" ], [ "a , n , arr , table = bissection(0,2,0.0001,3)\niterations(n,arr,3)\nprint(str(a) + \"\\n\" + str(func(a,3)))\ntable", "_____no_output_____" ], [ "x = np.arange(-10,10,0.001)\nplt.plot(x,0.5 + np.sin(x),label='$0.5 + sin(x)$')\nplt.plot(x,np.cos(x),label='$cos(x)$')\ngraph(4)\nplt.plot(x,0.5 + np.sin(x) - np.cos(x),label='$0.5 + sin(x) - cos(x)$')\ngraph(4)", "_____no_output_____" ], [ "a , n , arr , table = bissection(0,2,0.0001,4)\niterations(n,arr,4)\nprint(str(a) + \"\\n\" + str(func(a,4)))\ntable", "_____no_output_____" ], [ "x = np.arange(-0,5,0.001)\nplt.plot(x,x,label='$x$')\nplt.plot(x,np.e**(-x),label='$e^{-x}$')\ngraph(5)\nplt.plot(x,x - np.e**(-x),label='$x - e^{-x}$')\ngraph(5)", "_____no_output_____" ], [ "a , n , arr , table = bissection(0,1,0.0001,5)\niterations(n,arr,5)\nprint(str(a) + \"\\n\" + str(func(a,5)))\ntable", "_____no_output_____" ], [ "x = np.arange(0,5,0.001)\nplt.plot(x,np.sin(x),label='$sin(x)$')\nplt.plot(x,np.e**(-x),label='$e^{-x}$')\ngraph(6)\nplt.plot(x,np.sin(x) - np.e**(-x),label='$sin(x) - e^{-x}$')\ngraph(6)", "_____no_output_____" ], [ "a , n , arr , table = bissection(0,1,0.0001,6)\niterations(n,arr,6)\nprint(str(a) + \"\\n\" + str(func(a,6)))\ntable", "_____no_output_____" ], [ "a , n , arr , table = bissection(3,4,0.0001,6)\niterations(n,arr,6)\nprint(str(a) + \"\\n\" + str(func(a,6)))\ntable", "_____no_output_____" ], [ "x = np.arange(-2,4,0.001)\nplt.plot(x,x**3,label='$x^3$')\nplt.plot(x,2*x+2,label='$2x + 2$')\ngraph(7)\nplt.plot(x,x**3 - 2*x - 2,label='$x^3 - 2x - 2$')\ngraph(7)", "_____no_output_____" ], [ "a , n , arr , table = bissection(1,2,0.0001,7)\niterations(n,arr,7)\nprint(str(a) + \"\\n\" + str(func(a,7)))\ntable", "_____no_output_____" ], [ "x = np.arange(-2,4,0.001)\nplt.plot(x,x**4,label='$x^4$')\nplt.plot(x,x+1,label='$x+1$')\ngraph(8)\nplt.plot(x,x**4 - x - 1,label='$x^4 - x - 1$')\ngraph(8)", "_____no_output_____" ], [ "a , n , arr , table = bissection(-1,0,0.0001,8)\niterations(n,arr,8)\nprint(str(a) + \"\\n\" + str(func(a,8)))\ntable", "_____no_output_____" ], [ "a , n , arr , table = bissection(1,2,0.0001,8)\niterations(n,arr,8)\nprint(str(a) + \"\\n\" + str(func(a,8)))\ntable", "_____no_output_____" ], [ "x = np.arange(-5,4,0.001)\nplt.plot(x,np.e**(x),label='$e^x$')\nplt.plot(x,x+2,label='$x+2$')\ngraph(9)\nplt.plot(x,np.e**(x) - x - 2,label='$e^2 - x - 2$')\ngraph(9)", "_____no_output_____" ], [ "a , n , arr , table = bissection(1,2,0.0001,9)\niterations(n,arr,9) \nprint(str(a) + \"\\n\" + str(func(a,9)))\ntable", "_____no_output_____" ], [ "x = np.arange(-5,4,0.001)\nplt.plot(x,-np.sin(x),label='$-sin(x)$')\nplt.plot(x,1-x,label='$1 - x$')\ngraph(10)\nplt.plot(x,-np.sin(x) - 1 + x,label='$-sin(x) - 1 + x$')\ngraph(10)", "_____no_output_____" ], [ "a , n , arr , table = bissection(0,2,0.0001,10)\niterations(n,arr,10)\nprint(str(a) + \"\\n\" + str(func(a,10)))\ntable", "_____no_output_____" ], [ "x = np.arange(-10,10,.001)\nplt.plot(np.tan(x),label='$tan(x)$')\nplt.plot(x,label='$x$')\ngraph(11)\nplt.plot(np.tan(x) - x,label='$x - tan(x)$')\ngraph(11)", "_____no_output_____" ], [ "a , n , arr , table = bissection(4,5,0.0001,11)\niterations(n,arr,11)\nprint(str(a) + \"\\n\" + str(func(a,11)))\ntable", "_____no_output_____" ], [ "a , n , arr , table = bissection(80,120,0.0001,11)\niterations(n,arr,11) \nprint(str(a) + \"\\n\" + str(func(a,11)))\ntable", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
c5307a0017935920e68811f72fc6683722e38ed2
100,907
ipynb
Jupyter Notebook
exercise_07/1.pytorch.ipynb
stanley-chang/I2DL
78740460e1f52ce7643358fc548281f1bbe73a42
[ "RSA-MD" ]
null
null
null
exercise_07/1.pytorch.ipynb
stanley-chang/I2DL
78740460e1f52ce7643358fc548281f1bbe73a42
[ "RSA-MD" ]
null
null
null
exercise_07/1.pytorch.ipynb
stanley-chang/I2DL
78740460e1f52ce7643358fc548281f1bbe73a42
[ "RSA-MD" ]
null
null
null
67.586738
28,968
0.778509
[ [ [ "# PyTorch Introduction\n\nThis is an introduction of PyTorch. It’s a Python-based scientific computing package targeted at two sets of audiences:\n\n- A replacement for NumPy to use the power of GPUs;\n\n- a deep learning research platform that provides maximum flexibility and speed.\n - [`torch.Tensor`](https://pytorch.org/docs/stable/tensors.html) is the central class of PyTorch.\n\n - Central to all neural networks in PyTorch is the [`autograd`](https://pytorch.org/docs/stable/autograd.html)\n package. It provides automatic differentiation for all\n operations on Tensors. If we set the attribute `.requires_grad` of `torch.Tensor` as `True`, it starts to\n track all operations on it. When finishing computation, we can call `.backward()` and have all the gradients\n computed automatically. The gradient for this tensor will be accumulated into `.grad` attribute.\n\n\n## Goals of this tutorial\n\n- Understanding PyTorch's Tensor library and neural networks at a high level;\n\n- Training a small network with PyTorch;\n\n\n## Preparation\n\n- Install [PyTorch](https://pytorch.org/) and [torchvision](https://github.com/pytorch/vision) (CPU version); (**If you want to install a cuda version, remember to change the type of the following cell into markdown**)\n\n\n", "_____no_output_____" ] ], [ [ "# Linux and probably Windows, remove the \"> /dev/null\" if you want to see the output\n# !pip install torch==1.4.0+cpu torchvision==0.5.0+cpu -f https://download.pytorch.org/whl/torch_stable.html > /dev/null\n# Mac\n!pip install torch==1.4.0 torchvision==0.5.0 > /dev/null", "\u001b[33mWARNING: You are using pip version 20.0.2; however, version 20.1.1 is available.\r\nYou should consider upgrading via the '/Users/Stanley/anaconda3/bin/python -m pip install --upgrade pip' command.\u001b[0m\r\n" ] ], [ [ "- <div class=\"alert alert-block alert-info\"><b>(Optional)</b> You can also install a\n<a href=\"https://developer.nvidia.com/cuda-downloads\">Cuda</a>\nversion if an Nvidia GPU and Cuda setup is installed on your machine, e.g.</div>\n\n```python\n# CUDA 10.0\npip install torch==1.4.0+cu100 torchvision==0.5.0+cu100 -f https://download.pytorch.org/whl/torch_stable.html\n```\n- <div class=\"alert alert-block alert-danger\">Make sure you've installed the <b>same version of PyTorch and\n torchvision</b>. If you install your own version, there might be some issues.</div>", "_____no_output_____" ] ], [ [ "import torch\nimport torchvision\nprint(f\"Torch version: {torch.__version__}\\nTorchvision version: {torchvision.__version__}\\n\")\nif not torch.__version__.startswith(\"1.4.0\"):\n print(\"you are using an another version of PyTorch. We expect PyTorch 1.4.0. You can continue with your version but it\"\n \" might cause some issues\")\nif not torchvision.__version__.startswith(\"0.5.0\"):\n print(\"you are using an another version of torchvision. We expect torchvision 0.5.0. You can continue with your version but it\"\n \" might cause some issues\")", "Torch version: 1.4.0\nTorchvision version: 0.5.0\n\n" ] ], [ [ "## 1. Getting Started\n\nIn this session you will learn the basic element Tensor and some simple oprations of PyTorch.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport torchvision.transforms as transforms\nfrom torch.utils.data.sampler import SubsetRandomSampler\n\nimport os\nimport pandas as pd\npd.options.mode.chained_assignment = None # default='warn'\n\n%load_ext autoreload\n%autoreload 2\n%matplotlib inline", "_____no_output_____" ] ], [ [ "### 1.1 Tensors\n\nTensors are similar to NumPy’s ndarrays, with the addition being that Tensors can also be used on a GPU to accelerate\ncomputing.", "_____no_output_____" ] ], [ [ "# Construct a (2,3) NumPy array and a (2,3) tensor directly from data\n# [[1 2 3]\n# [4 5 6]]\na_np = np.array([[1,2,3],[5,6,7]]) #NumPy array\na_ts = torch.tensor([[1,2,3],[4,5,6]]) # Tensor\nprint(\"a_np:\\n {},\\n Shape: {}\".format(type(a_np), a_np.shape))\n# print(a_np)\nprint(\"a_ts:\\n {},\\n Shape: {}\".format(type(a_ts), a_ts.shape) )\nprint(a_ts)", "a_np:\n <class 'numpy.ndarray'>,\n Shape: (2, 3)\na_ts:\n <class 'torch.Tensor'>,\n Shape: torch.Size([2, 3])\ntensor([[1, 2, 3],\n [4, 5, 6]])\n" ] ], [ [ "### 1.2 Conversion btw. NumPy ndarray and Tensor\n\nThe conversion between NumPy ndarray and PyTorh tensor is quite easy.\n", "_____no_output_____" ] ], [ [ "# Conversion\nm_np = np.array([1, 2, 3])\nn_ts = torch.from_numpy(m_np) #Convert a numpy array to a Tensor\n\nv_np = n_ts.numpy() #Tensor to numpy\nv_np[1] = -1 #Numpy and Tensor share the same memory\nassert(m_np[1] == v_np[1]) #Change Numpy will also change the Tensor", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-block alert-info\"><b>Hint:</b> During the conversion, both ndarray and Tensor share the same memory storage. Change value from either side will\naffect the other.</div>\n\n### 1.3 Operations\n\n#### 1.3.1 Indexing\n\nWe can use the NumPy indexing in Tensors:", "_____no_output_____" ] ], [ [ "a_ts", "_____no_output_____" ], [ "# Let us take the first two columns from the original array and save it in a new one\nb = a_ts[:2, :2] #Use numpy type indexing\n#b.shape\nb[:, 0] = 0 #For assignment\nprint(b)", "tensor([[0, 2],\n [0, 5]])\n" ], [ "# Select elements which satisfy a condition\n# Using numpy array makes such a selection trivial\nmask = a_ts > 1\nnew_array = a_ts[mask]\nprint(new_array)", "tensor([2, 3, 5, 6])\n" ], [ "# Do the same thing in a single step\nc = a_ts[a_ts>1]\nprint(c == new_array) #Why assert doesn't work here\n##assert np.all(new_array == c) # np.all() to indicate that all the values need to match", "tensor([True, True, True, True])\n" ] ], [ [ "#### 1.3.2 Mathematical operations", "_____no_output_____" ] ], [ [ "torch.empty(2, 2)", "_____no_output_____" ], [ "# Mathematical operations\nx = torch.tensor([[1,2],[3,4]])\ny = torch.tensor([[5,6],[7,8]])\n\n# Elementwise Addition\n# [[ 6.0 8.0]\n# [10.0 12.0]]\n#Addition: syntax 1\nprint(\"x + y: {}\".format(x + y))\n#Addition: syntax 2\nprint(\"x + y: {}\".format(torch.add(x, y)))\n#Addition: syntax 3\nresult_add = torch.empty(2, 2)\ntorch.add(x, y, out=result_add)\nprint(\"x + y: {}\".format(result_add))\n\n# Elementwise Subtraction\n# [[-4.0 -4.0]\n# [-4.0 -4.0]]\n# Subtraction: syntax 1\nprint(\"x - y: {}\".format(x - y))\n# Subtraction: syntax 2\nprint(\"x - y: {}\".format(torch.sub(x, y)))\n# Subtraction: syntax 3\nresult_sub = torch.empty(2, 2)\ntorch.sub(x, y, out=result_sub)\nprint(\"x - y: {}\".format(result_sub))\n\n# Elementwise Multiplication\n# [[ 5.0 12.0]\n# [21.0 32.0]]\n# Multiplication: syntax 1\nprint(\"x * y: {}\".format(x * y))\n# Multiplication: syntax 2\nprint(\"x * y: {}\".format(torch.mul(x, y)))\n# Multiplication: syntax 3\nresult_mul = torch.empty(2, 2)\ntorch.mul(x, y, out=result_mul)\nprint(\"x * y: {}\".format(result_mul))\n\n\n", "x + y: tensor([[ 6, 8],\n [10, 12]])\nx + y: tensor([[ 6, 8],\n [10, 12]])\nx + y: tensor([[ 6., 8.],\n [10., 12.]])\nx - y: tensor([[-4, -4],\n [-4, -4]])\nx - y: tensor([[-4, -4],\n [-4, -4]])\nx - y: tensor([[-4., -4.],\n [-4., -4.]])\nx * y: tensor([[ 5, 12],\n [21, 32]])\nx * y: tensor([[ 5, 12],\n [21, 32]])\nx * y: tensor([[ 5., 12.],\n [21., 32.]])\n" ] ], [ [ "When dividing two ints in NumPy, the result is always a **float**, e.g.", "_____no_output_____" ] ], [ [ "x_np = np.array([[1,2],[3,4]])\ny_np = np.array([[5,6],[7,8]])\nprint(x_np / y_np)", "[[0.2 0.33333333]\n [0.42857143 0.5 ]]\n" ] ], [ [ "\n\n**However, in PyTorch 1.4.0 `torch.div` calculates floor division if both operands have integer types**;\n If you want **true division** for integers, pleases convert the integers into floats first or specify the output as\n `torch.div(a, b, out=c)`.\n<div class=\"alert alert-block alert-danger\">In PyTorch 1.5.0 you can use <b>true_divide</b> or <b>floor_divide</b>\n to calculate true division or floor division. And in future release div will perform true division as in Python 3. </div>\n", "_____no_output_____" ] ], [ [ "# Elementwise Division\n# Floor Division: syntax 1\nprint(\"x // y: {}\".format(x / y))\n# Floor Division: syntax 2\nprint(\"x // y: {}\".format(torch.div(x, y)))\n# True Division: syntax 1\nresult_true_div = torch.empty(2, 2)\ntorch.div(x, y, out=result_true_div)\nprint(\"x / y: {}\".format(result_true_div))", "x // y: tensor([[0, 0],\n [0, 0]])\nx // y: tensor([[0, 0],\n [0, 0]])\nx / y: tensor([[0.2000, 0.3333],\n [0.4286, 0.5000]])\n" ] ], [ [ "### 1.4 Devices\n\nWhen training a neural network, make sure that all the tensors are on the same device. Tensors can be moved onto any device using `.to` method.", "_____no_output_____" ] ], [ [ "# We will use ``torch.device`` objects to move tensors in and out of GPU\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nprint(device)\n\nprint(f\"Original device: {x.device}\") # \"cpu\", integer\n\ntensor = x.to(device)\nprint(f\"Current device: {tensor.device}\") #\"cpu\" or \"cuda\", double", "cpu\nOriginal device: cpu\nCurrent device: cpu\n" ] ], [ [ "So `x` has been moved onto cuda for those who have a GPU; otherwise it's still on the CPU.\n\n<div class=\"alert alert-block alert-info\"><b>Tip:</b> Include the <b>.to(device)</b> calls for every project such that\nyou can easily port it to a GPU version.</div>", "_____no_output_____" ], [ "## 2. Training a classifier with PyTorch\n\nIn this session, you'll have an overview about how we could use PyTorch to load data, define neural networks, compute\nloss and make updates to the weights of the network.\n\n\nWe will do the following steps in order:\n\na) Dataloading in Pytorch compared to our previous datasets\n\nb) Define a two-layer network\n\nc) Define a loss function and optimizer\n\nd) Train the network\n\ne) Test the network\n\n### 2.1 Datasets and Loading\n\nThe general procedure of dataloading is:\n\na) Extract: Get the data from the source\n\nb) Transform: Put our data into suitable form (e.g. tensor form)\n\nc) Load: Put our data into an object to make it easily accessible\n\n#### 2.1.1 House price\n\nWe'll use our dataloader and the dataloader of PyTorch to load the house price dataset separately.\n\nFirst, let's initialize our csv dataset from exercise 3:", "_____no_output_____" ] ], [ [ "from exercise_code.data.csv_dataset import CSVDataset, get_exercise5_transform\nfrom exercise_code.data.dataloader import DataLoader as our_DataLoader\n\n# dataloading and preprocessing steps as in ex04 2_logistic_regression.ipynb\ntarget_column = 'SalePrice'\n\ni2dl_exercises_path = os.path.dirname(os.path.abspath(os.getcwd()))\nroot_path = os.path.join(i2dl_exercises_path, \"datasets\", 'housing')\nhousing_file_path = os.path.join(root_path, \"housing_train.csv\")\ndownload_url = 'https://cdn3.vision.in.tum.de/~dl4cv/housing_train.zip'\n\n# Set up the transform to get two prepared columns\nselect_two_columns_transform = get_exercise5_transform()\n\n# Set up the dataset\nour_csv_dataset = CSVDataset(target_column=target_column, root=root_path, download_url=download_url, mode=\"train\",\n transform=select_two_columns_transform)", "_____no_output_____" ] ], [ [ "Now we can set up our dataloader similar to Exercise 5", "_____no_output_____" ] ], [ [ "# Set up our old dataloader\nbatch_size = 4\nour_dataloader = our_DataLoader(our_csv_dataset, batch_size=batch_size)\n\nfor i, item in enumerate(our_dataloader):\n print('Starting item {}'.format(i))\n print('item contains')\n for key in item:\n print(key)\n print(type(item[key]))\n print(item[key].shape)\n \n if i+1 >= 1:\n break", "Starting item 0\nitem contains\nfeatures\n<class 'numpy.ndarray'>\n(4, 2)\ntarget\n<class 'numpy.ndarray'>\n(4, 1)\n" ] ], [ [ "In pyTorch we can directly use a [`Dataloader` class](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader)\nand simply initalize it. And it also provides more parameters than ours, such as easy multiprocessing using `num_workers`. You can refer to the link\nto learn those additional supports.", "_____no_output_____" ] ], [ [ "from torch.utils.data import DataLoader\n\npytorch_dataloader = DataLoader(our_csv_dataset, batch_size=batch_size)\n\n# We can use the exact same way to iterate over samples\nfor i, item in enumerate(pytorch_dataloader):\n print('Starting item {}'.format(i))\n print('item contains')\n for key in item:\n print(key)\n print(type(item[key]))\n print(item[key].shape)\n \n if i+1 >= 1:\n break", "Starting item 0\nitem contains\nfeatures\n<class 'torch.Tensor'>\ntorch.Size([4, 2])\ntarget\n<class 'torch.Tensor'>\ntorch.Size([4, 1])\n" ] ], [ [ "<div class=\"alert alert-block alert-info\">As you can see, both dataloaders load the data with batch_size 4 and the data contains 2 features and 1 target. The only <b>difference</b> here is that the Dataloader of PyTorch will automatically transform the dataset into tensor format.</div>\n\n#### 2.1.2 Torchvision\n\nSpecifically for vision, there's a package called `torchvision`, that has data loaders for common datasets such\nas Imagenet, FashionMNIST, MNIST, etc. and data transformers for images:\n`torchvision.datasets` and `torch.utils.data.DataLoader`.\n\nThis provides a huge convenience and avoids writing boilerplate code.\n\nFor this tutorial, we will use FashionMNIST dataset. It has 10 classes: 'T-shirt/top', 'Trouser', 'Pullover',\n'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'. The images in FashionMNIST\nare of size $1 \\times 28 \\times 28 $, i.e. 1-channel color images of $ 28 \\times 28 $ pixels in size.", "_____no_output_____" ] ], [ [ "transforms", "_____no_output_____" ], [ "#Define a transform to convert images to tensor\ntransform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.5,),(0.5,))]) # mean and std have to be sequences (e.g. tuples),\n # therefore we should add a comma after the values\n\nfashion_mnist_dataset = torchvision.datasets.FashionMNIST(root='../datasets', train=True,\n download=True, transform=transform)\nfashion_mnist_test_dataset = torchvision.datasets.FashionMNIST(root='../datasets', train=False,\n download=True, transform=transform)\n\nfashion_mnist_dataloader = DataLoader(fashion_mnist_dataset, batch_size=8)\nfashion_mnist_test_dataloader = DataLoader(fashion_mnist_test_dataset, batch_size=8)\n\nclasses = ('T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',\n 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot')", "_____no_output_____" ] ], [ [ "- `transforms.Compose` creates a series of transformation to prepare the dataset.\n\n- `transforms.ToTenser` convert `PIL image` or numpy.ndarray $(H \\times W\\times C)$ in the range [0,255] to a\n`torch.FloatTensor` of shape $(C \\times H \\times W)$ in the range [0.0, 1.0].\n\n- `transforms.Normalize` normalize a tensor image with mean and standard deviation.\n\n- `datasets.FashionMNIST` to download the Fashion MNIST datasets and transform the data.\n`train=True` if we want to get the training set; otherwise set `train=False` to get the\ntest set.\n\n- `torch.utils.data.Dataloader` takes our training data or test data with parameter\n`batch_size` and `shuffle`. `batch_size` defines how many samples per batch to load.\n`shuffle=True` makes the data reshuffled at every epoch.", "_____no_output_____" ] ], [ [ "# We can use the exact same way to iterate over samples\nfor i, item in enumerate(fashion_mnist_dataloader):\n print('Starting item {}'.format(i))\n print('item contains')\n image, label = item\n print(f\"Type of input: {type(image)}\")\n print(f\"Shape of the input: {image.shape}\")\n print(f\"label: {label}\")\n\n if i+1 >= 1:\n break", "Starting item 0\nitem contains\nType of input: <class 'torch.Tensor'>\nShape of the input: torch.Size([8, 1, 28, 28])\nlabel: tensor([9, 0, 0, 3, 0, 2, 7, 2])\n" ] ], [ [ "Since we loaded the data with `batch_size` 8, the shape of the input is (8, 1, 28, 28). So before we push it into the affine layer, we need to flatten it with `x = x.view(-1, x.size[0)` (It will be shown later in 2.2)\n\n\nLet's show some of the training images.", "_____no_output_____" ] ], [ [ "def imshow(img):\n img = img / 2 + 0.5 # unormalize\n npimg = img.numpy()\n plt.imshow(np.transpose(npimg, (1, 2, 0)))\n plt.show()\n\n# get some random training images\ndataiter = iter(fashion_mnist_dataloader)\nimages, labels = dataiter.next()\n# show images\nimshow(torchvision.utils.make_grid(images))\n# print labels\nprint(' '.join('%5s' % classes[labels[j]] for j in range(8)))", "_____no_output_____" ] ], [ [ "### 2.2 Define a Two-Layer Network", "_____no_output_____" ], [ "In exercise_06 we've defined the forward and backward pass for an affine layer and a Sigmoid layer\n(`exercise_code/networks/layer.py`) and completed the implementation of the `ClassificationionNet` class\n(`exercise_code/networks/classifiation_net.py`).", "_____no_output_____" ] ], [ [ "from exercise_code.networks.classification_net import ClassificationNet\nhidden_size = 100\nstd = 1.0\nmodel_ex06 = ClassificationNet(input_size=2, hidden_size=hidden_size, std=std)", "_____no_output_____" ] ], [ [ "Have a look at your lengthy implementation first ;). Now, we can use `torch.nn.Module` to define our network class, e.g.", "_____no_output_____" ] ], [ [ "import torch.nn as nn\n\n\nclass Net(nn.Module):\n def __init__(self, activation=nn.Sigmoid(),\n input_size=1*28*28, hidden_size=100, classes=10):\n super(Net, self).__init__()\n self.input_size = input_size\n\n # Here we initialize our activation and set up our two linear layers\n self.activation = activation\n self.fc1 = nn.Linear(input_size, hidden_size)\n self.fc2 = nn.Linear(hidden_size, classes)\n\n def forward(self, x):\n x = x.view(-1, self.input_size) # flatten\n x = self.fc1(x)\n x = self.activation(x)\n x = self.fc2(x)\n\n return x", "_____no_output_____" ] ], [ [ "Similar to the `ClassificationNet` in exercise_06, here we defined a network with PyTorch.\n\n - PyTorch provides a `nn.Module` that builds neural networks\n\n - `super().__init__` creates a class that inherits attributes and behaviors from another\n class\n\n - `self.fc1` creates an affine layer with `input_size` inputs and `hidden_size` outputs.\n\n - `self.fc2` is similar to `self.fc1`.\n\n - `Forward` pass:\n\n - first flatten the `x` with `x = x.view(-1, self.input_size)`\n\n - 'Sandwich layer' by applying `fc1`, `activation`, `fc2` sequentially.\n \n<div class=\"alert alert-block alert-info\">Thanks to <b>autograd</b> package, we just have to define the <b>forward</b> function. \n And the <b>backward</b> function (where gradients are computed) is automatically defined. We can use any of the Tensor operations in the <b>forward</b> function.</div>\n\n<div class=\"alert alert-block alert-info\"> We can use <b>print</b> to see all difined layers (but it won't show\nthe information of the forward pass).\n\nAnd all the learnable parameters of a model are returned by <b>[model_name].parameters()</b>. We also have access to\nthe parameters of different layers by <b>[model_name].[layer_name].parameters()</b> </div>", "_____no_output_____" ] ], [ [ "# create model\nnet = Net()\nnet = net.to('cpu') #always remember to move the network to the device\n\nprint(net)\n\nfor parameter in net.parameters():\n print(parameter.shape)", "Net(\n (activation): Sigmoid()\n (fc1): Linear(in_features=784, out_features=100, bias=True)\n (fc2): Linear(in_features=100, out_features=10, bias=True)\n)\ntorch.Size([100, 784])\ntorch.Size([100])\ntorch.Size([10, 100])\ntorch.Size([10])\n" ] ], [ [ "### 2.3 Define a Loss function and optimizer\n\nLet's use a Classification Cross-Entropy loss and SGD with momentum.\n\nRecall that we've implemented SGD and MSE in exercise_04. Have a look at their implementations in\n `exercise_code/networks/optimizer.py` and `exercise_code/networks/loss.py`", "_____no_output_____" ] ], [ [ "from exercise_code.networks.optimizer import SGD\nfrom exercise_code.networks.loss import MSE, L1", "_____no_output_____" ] ], [ [ "Now we can import the loss function and optimizer directly from `torch.nn` and `torch.optim` respectively, e.g.", "_____no_output_____" ] ], [ [ "import torch.optim as optim\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)", "_____no_output_____" ] ], [ [ "### 2.4 Train the network\n\nThis is when things start to get interesting. We simply have to loop over our data iterator, and feed the inputs to\nthe network and optimize.", "_____no_output_____" ] ], [ [ "device = 'cpu'\ntrain_loss_history = [] # loss\ntrain_acc_history = [] # accuracy\nfor epoch in range(2):\n\n # TRAINING\n running_loss = 0.0\n correct = 0.0\n total = 0\n for i, data in enumerate(fashion_mnist_dataloader, 0):\n # get the inputs; data is a list of [inputs, labels]\n X, y = data\n\n X = X.to(device)\n y = y.to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n y_pred = net(X) # input x and predict based on x\n loss = criterion(y_pred, y) # calculate the loss\n loss.backward() # backpropagation, compute gradients\n optimizer.step() # apply gradients\n\n # loss and acc\n running_loss += loss.item()\n _, preds = torch.max(y_pred, 1) #convert output probabilities to predicted class\n correct += preds.eq(y).sum().item()\n total += y.size(0)\n\n # print statistics\n if i % 1000 == 999: # print every 1000 mini-batches\n running_loss /= 1000\n correct /= total\n print(\"[Epoch %d, Iteration %5d] loss: %.3f acc: %.2f %%\" % (epoch+1, i+1, running_loss, 100*correct))\n train_loss_history.append(running_loss)\n train_acc_history.append(correct)\n running_loss = 0.0\n correct = 0.0\n total = 0\n\nprint('FINISH.')", "[Epoch 1, Iteration 1000] loss: 1.528 acc: 56.60 %\n[Epoch 1, Iteration 2000] loss: 0.909 acc: 71.95 %\n[Epoch 1, Iteration 3000] loss: 0.744 acc: 74.49 %\n[Epoch 1, Iteration 4000] loss: 0.659 acc: 76.85 %\n[Epoch 1, Iteration 5000] loss: 0.612 acc: 78.51 %\n[Epoch 1, Iteration 6000] loss: 0.578 acc: 79.40 %\n[Epoch 1, Iteration 7000] loss: 0.556 acc: 80.31 %\n[Epoch 2, Iteration 1000] loss: 0.522 acc: 81.66 %\n[Epoch 2, Iteration 2000] loss: 0.509 acc: 81.99 %\n[Epoch 2, Iteration 3000] loss: 0.510 acc: 81.79 %\n[Epoch 2, Iteration 4000] loss: 0.487 acc: 83.14 %\n[Epoch 2, Iteration 5000] loss: 0.484 acc: 83.49 %\n[Epoch 2, Iteration 6000] loss: 0.472 acc: 83.29 %\n[Epoch 2, Iteration 7000] loss: 0.472 acc: 83.24 %\nFINISH.\n" ] ], [ [ "So the general training pass is as fowllows:\n\n- `zero_grad()`: zero the gradient buffers of all parameters and backprops with random gradient\n\n- `y_pred = net(X)`: make a forward pass through the network to getting log probabilities by passing the\nimages to the model.\n\n- `loss = criterion(y_pred, y)`: calculate the loss\n\n- `loss.backward()`: perform a backward pass through the network to calculate the gradients for model parameters.\n\n- `optimizer.step()`: take a step with the optimizer to update the model parameters.\n\nWe keep tracking the training loss and accuracy over time. The following plot shows averages values for train loss and\naccuracy.", "_____no_output_____" ] ], [ [ "plt.plot(train_acc_history)\nplt.plot(train_loss_history)\nplt.title(\"FashionMNIST\")\nplt.xlabel('iteration')\nplt.ylabel('acc/loss')\nplt.legend(['acc', 'loss'])\nplt.show()\n", "_____no_output_____" ] ], [ [ "### 2.5 Test the network on the test data\n\nWe have trained the network for 2 passes over the training dataset. Now we want to check\nthe model by predicting the class label that the neural network outputs, and checking it\nagainst the ground-truth. If the prediction is correct, we add the sample to the list of\ncorrect predictions.\n\nAnd we'll visualize the data to display test images and their labels in the following format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.", "_____no_output_____" ] ], [ [ "#obtain one batch of test images\ndataiter = iter(fashion_mnist_test_dataloader)\nimages, labels = dataiter.__next__()\nimages, labels = images.to(device), labels.to(device)\n\n# get sample outputs\noutputs = net(images)\n# convert output probabilites to predicted class\n_, predicted = torch.max(outputs, 1)\n\n# prep images for display\nimages = images.cpu().numpy()\n\n# plot the images in the batch, along with predicted and true labels\nfig = plt.figure(figsize=(25,4))\nfor idx in range(8):\n ax = fig.add_subplot(2, 8/2, idx+1, xticks=[], yticks=[])\n ax.imshow(np.squeeze(images[idx]), cmap='gray')\n ax.set_title(f\"{classes[predicted[idx]]} ({classes[labels[idx]]})\",\n color=\"green\" if predicted[idx]==labels[idx] else \"red\")", "_____no_output_____" ] ], [ [ "We can also show what are the classes that performed well, and the classes that did not perform well:", "_____no_output_____" ] ], [ [ "class_correct = list(0. for i in range(10))\nclass_total = list(0. for i in range(10))\nwith torch.no_grad():\n for data in fashion_mnist_test_dataloader:\n images, labels = data\n images, labels = images.to(device), labels.to(device)\n outputs = net(images)\n _, predicted = torch.max(outputs, 1)\n c = (predicted == labels).squeeze()\n for i in range(4):\n label = labels[i]\n class_correct[label] += c[i].item()\n class_total[label] += 1\n\n\nfor i in range(10):\n print('Accuracy of %11s: %2d %%' % (classes[i], 100 * class_correct[i] / class_total[i]))", "Accuracy of T-shirt/top: 82 %\nAccuracy of Trouser: 93 %\nAccuracy of Pullover: 77 %\nAccuracy of Dress: 89 %\nAccuracy of Coat: 76 %\nAccuracy of Sandal: 88 %\nAccuracy of Shirt: 39 %\nAccuracy of Sneaker: 91 %\nAccuracy of Bag: 94 %\nAccuracy of Ankle boot: 93 %\n" ] ], [ [ "## Reference\n\n1. [PyTorch Tutorial](https://pytorch.org/tutorials/)\n\n2. [Fashion MNIST dataset training using PyTorch](https://medium.com/@aaysbt/fashion-mnist-data-training-using-pytorch-7f6ad71e96f4)\n\n\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
c530809167a5a374101a6bc38511c647d53494b0
753,168
ipynb
Jupyter Notebook
ML Notebooks/.ipynb_checkpoints/PCA_MNIIST-checkpoint.ipynb
Sahil-Chavan/ML_Playground
cd6b12db7f64e58aae88d7672343aa0406347bb1
[ "Unlicense" ]
null
null
null
ML Notebooks/.ipynb_checkpoints/PCA_MNIIST-checkpoint.ipynb
Sahil-Chavan/ML_Playground
cd6b12db7f64e58aae88d7672343aa0406347bb1
[ "Unlicense" ]
9
2020-09-30T20:07:30.000Z
2021-02-21T18:39:16.000Z
ML Notebooks/.ipynb_checkpoints/PCA_MNIIST-checkpoint.ipynb
Sahil-Chavan/ML_Playground
cd6b12db7f64e58aae88d7672343aa0406347bb1
[ "Unlicense" ]
null
null
null
1,806.158273
371,512
0.959989
[ [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n%matplotlib inline", "_____no_output_____" ], [ "dfd = pd.read_csv('../datafiles/pca_mnist.csv')\ndf= dfd.head(15000)", "_____no_output_____" ], [ "labels = df.label.copy()\ndf.drop(labels='label',axis = 1,inplace=True)", "C:\\Users\\magic\\anaconda3\\envs\\playground\\lib\\site-packages\\pandas\\core\\frame.py:3990: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n return super().drop(\n" ], [ "from sklearn.preprocessing import StandardScaler\nstd_data = StandardScaler().fit_transform(df)\nprint(std_data.shape,df.shape)", "(15000, 784) (15000, 784)\n" ], [ "cov_data = np.matmul(std_data.T,std_data)\ncov_data.shape", "_____no_output_____" ], [ "from scipy import linalg\ne_val , e_vec = linalg.eigh(cov_data,eigvals=(782,783))\nprint(df.shape,e_vec.shape)", "(15000, 784) (784, 2)\n" ], [ "new_data= np.matmul(std_data,e_vec)\nnew_data.shape", "_____no_output_____" ], [ "ndf = pd.DataFrame(data = new_data,columns=['col1','col2'])\nndf['labels']=labels\nndf", "_____no_output_____" ], [ "ndf['labels'] = ndf['labels'].astype(str)\nndf.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 15000 entries, 0 to 14999\nData columns (total 3 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 col1 15000 non-null float64\n 1 col2 15000 non-null float64\n 2 labels 15000 non-null object \ndtypes: float64(2), object(1)\nmemory usage: 351.7+ KB\n" ], [ "ndf.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 15000 entries, 0 to 14999\nData columns (total 3 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 col1 15000 non-null float64\n 1 col2 15000 non-null float64\n 2 labels 15000 non-null object \ndtypes: float64(2), object(1)\nmemory usage: 351.7+ KB\n" ], [ "sns.set_style(\"whitegrid\")\nfig = plt.figure(figsize=(10,10))\nsns.scatterplot(x='col1',y='col2',hue='labels',data=ndf,legend=\"full\",palette=sns.color_palette(\"bright\", 10))", "_____no_output_____" ] ], [ [ "### Principal Component Analysis using sickit learn", "_____no_output_____" ] ], [ [ "from sklearn import decomposition\npca = decomposition.PCA(n_components=2)\npca_data = pca.fit_transform(std_data)\npca_fin = pd.DataFrame(data = pca_data,columns=['col1','col2'])\npca_fin['labels']=labels.astype(str)\nfig = plt.figure(figsize=(10,10))\nsns.scatterplot(x='col2',y='col1',hue='labels',data=pca_fin,legend=\"full\",palette=sns.color_palette(\"bright\", 10))", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
c53092f0df96e7f6f72fa9965a1a6223b65469b5
21,250
ipynb
Jupyter Notebook
notebooks/pysaprk/gbdt-regression.ipynb
jianzhnie/AutoML-Tools
10ffd2a92458a2d32ecb7b82d5584860e9126801
[ "Apache-2.0" ]
null
null
null
notebooks/pysaprk/gbdt-regression.ipynb
jianzhnie/AutoML-Tools
10ffd2a92458a2d32ecb7b82d5584860e9126801
[ "Apache-2.0" ]
null
null
null
notebooks/pysaprk/gbdt-regression.ipynb
jianzhnie/AutoML-Tools
10ffd2a92458a2d32ecb7b82d5584860e9126801
[ "Apache-2.0" ]
null
null
null
35.535117
510
0.627718
[ [ [ "## Regression with gradient-boosted trees and MLlib pipelines\nThis notebook uses a bike-sharing dataset to illustrate MLlib pipelines and the gradient-boosted trees machine learning algorithm. The challenge is to predict the number of bicycle rentals per hour based on the features available in the dataset such as day of the week, weather, season, and so on. Demand prediction is a common problem across businesses; good predictions allow a business or service to optimize inventory and to match supply and demand to make customers happy and maximize profitability.", "_____no_output_____" ], [ "## Load the dataset\nThe dataset is from the UCI Machine Learning Repository and is provided with Databricks Runtime. The dataset includes information about bicycle rentals from the Capital bikeshare system in 2011 and 2012.\n\nLoad the data using the CSV datasource for Spark, which creates a Spark DataFrame.", "_____no_output_____" ] ], [ [ "from pyspark.sql.types import DoubleType, StringType, StructField, StructType\nfrom pyspark.sql import SparkSession\n\nspark = SparkSession \\\n .builder \\\n .appName(\"Regression \") \\\n .getOrCreate()\n\ndf = spark.read.csv(\"/home/robin/datatsets/bikeSharing/hour.csv\", header=\"true\", inferSchema=\"true\")\n# The following command caches the DataFrame in memory. This improves performance since subsequent calls to the DataFrame can read from memory instead of re-reading the data from disk.\ndf.cache()", "_____no_output_____" ] ], [ [ "## Data description\nThe following columns are included in the dataset:\n\n### Index column:\n\n- instant: record index\n\n### Feature columns:\n\n- dteday: date\n- season: season (1:spring, 2:summer, 3:fall, 4:winter)\n- yr: year (0:2011, 1:2012)\n- mnth: month (1 to 12)\n- hr: hour (0 to 23)\n- holiday: 1 if holiday, 0 otherwise\n- weekday: day of the week (0 to 6)\n- workingday: 0 if weekend or holiday, 1 otherwise\n- weathersit: (1:clear, 2:mist or clouds, 3:light rain or snow, 4:heavy rain or snow)\n- temp: normalized temperature in Celsius\n- atemp: normalized feeling temperature in Celsius\n- hum: normalized humidity\n- windspeed: normalized wind speed\n\n### Label columns:\n\n- casual: count of casual users\n- registered: count of registered users\n- cnt: count of total rental bikes including both casual and registered\n", "_____no_output_____" ], [ "Call display() on a DataFrame to see a sample of the data. The first row shows that 16 people rented bikes between midnight and 1am on January 1, 2011.", "_____no_output_____" ] ], [ [ "display(df)", "_____no_output_____" ], [ "print(\"The dataset has %d rows.\" % df.count())", "The dataset has 17379 rows.\n" ] ], [ [ "## Preprocess data\nThis dataset is well prepared for machine learning algorithms. The numeric input columns (temp, atemp, hum, and windspeed) are normalized, categorial values (season, yr, mnth, hr, holiday, weekday, workingday, weathersit) are converted to indices, and all of the columns except for the date (dteday) are numeric.\n\nThe goal is to predict the count of bike rentals (the cnt column). Reviewing the dataset, you can see that some columns contain duplicate information. For example, the cnt column equals the sum of the casual and registered columns. You should remove the casual and registered columns from the dataset. The index column instant is also not useful as a predictor.\n\nYou can also delete the column dteday, as this information is already included in the other date-related columns yr, mnth, and weekday\n", "_____no_output_____" ] ], [ [ "df = df.drop(\"instant\").drop(\"dteday\").drop(\"casual\").drop(\"registered\")\ndisplay(df)", "_____no_output_____" ] ], [ [ "Print the dataset schema to see the type of each column.", "_____no_output_____" ] ], [ [ "df.printSchema()", "root\n |-- season: integer (nullable = true)\n |-- yr: integer (nullable = true)\n |-- mnth: integer (nullable = true)\n |-- hr: integer (nullable = true)\n |-- holiday: integer (nullable = true)\n |-- weekday: integer (nullable = true)\n |-- workingday: integer (nullable = true)\n |-- weathersit: integer (nullable = true)\n |-- temp: double (nullable = true)\n |-- atemp: double (nullable = true)\n |-- hum: double (nullable = true)\n |-- windspeed: double (nullable = true)\n |-- cnt: integer (nullable = true)\n\n" ] ], [ [ "Split data into training and test sets", "_____no_output_____" ], [ "Randomly split data into training and test sets. By doing this, you can train and tune the model using only the training subset, and then evaluate the model's performance on the test set to get a sense of how the model will perform on new data.", "_____no_output_____" ] ], [ [ "# Split the dataset randomly into 70% for training and 30% for testing. Passing a seed for deterministic behavior\ntrain, test = df.randomSplit([0.7, 0.3], seed = 0)\nprint(\"There are %d training examples and %d test examples.\" % (train.count(), test.count()))", "There are 12204 training examples and 5175 test examples.\n" ] ], [ [ "## Visualize the data\nYou can plot the data to explore it visually. The following plot shows the number of bicycle rentals during each hour of the day. As you might expect, rentals are low during the night, and peak at commute hours.\n\nTo create plots, call display() on a DataFrame in Databricks and click the plot icon below the table.\n\nTo create the plot shown, run the command in the following cell. The results appear in a table. From the drop-down menu below the table, select \"Line\". Click Plot Options.... In the dialog, drag hr to the Keys field, and drag cnt to the Values field. Also in the Keys field, click the \"x\" next to <id> to remove it. In the Aggregation drop down, select \"AVG\".", "_____no_output_____" ] ], [ [ "display(train.select(\"hr\", \"cnt\"))", "_____no_output_____" ] ], [ [ "## Train the machine learning pipeline\nNow that you have reviewed the data and prepared it as a DataFrame with numeric values, you're ready to train a model to predict future bike sharing rentals.\n\nMost MLlib algorithms require a single input column containing a vector of features and a single target column. The DataFrame currently has one column for each feature. MLlib provides functions to help you prepare the dataset in the required format.\n\nMLlib pipelines combine multiple steps into a single workflow, making it easier to iterate as you develop the model.\n\nIn this example, you create a pipeline using the following functions:\n\n- VectorAssembler: Assembles the feature columns into a feature vector.\n- VectorIndexer: Identifies columns that should be treated as categorical. This is done heuristically, identifying any column with a small number of distinct values as categorical. In this example, the following columns are considered categorical: yr (2 values), season (4 values), holiday (2 values), workingday (2 values), and weathersit (4 values).\n- GBTRegressor: Uses the Gradient-Boosted Trees (GBT) algorithm to learn how to predict rental counts from the feature vectors.\n- CrossValidator: The GBT algorithm has several hyperparameters. This notebook illustrates how to use hyperparameter tuning in Spark. This capability automatically tests a grid of hyperparameters and chooses the best resulting model.\n\nFor more information:\n- VectorAssembler\n- VectorIndexer", "_____no_output_____" ], [ "The first step is to create the VectorAssembler and VectorIndexer steps.", "_____no_output_____" ] ], [ [ "from pyspark.ml.feature import VectorAssembler, VectorIndexer\n \n# Remove the target column from the input feature set.\nfeaturesCols = df.columns\nfeaturesCols.remove('cnt')\n \n# vectorAssembler combines all feature columns into a single feature vector column, \"rawFeatures\".\nvectorAssembler = VectorAssembler(inputCols=featuresCols, outputCol=\"rawFeatures\")\n \n# vectorIndexer identifies categorical features and indexes them, and creates a new column \"features\". \nvectorIndexer = VectorIndexer(inputCol=\"rawFeatures\", outputCol=\"features\", maxCategories=4)\n", "_____no_output_____" ] ], [ [ "### Next, define the model.", "_____no_output_____" ] ], [ [ "from pyspark.ml.regression import GBTRegressor\n \n# The next step is to define the model training stage of the pipeline. \n# The following command defines a GBTRegressor model that takes an input column \"features\" by default and learns to predict the labels in the \"cnt\" column. \ngbt = GBTRegressor(labelCol=\"cnt\")\n", "_____no_output_____" ] ], [ [ "The third step is to wrap the model you just defined in a CrossValidator stage. CrossValidator calls the GBT algorithm with different hyperparameter settings. It trains multiple models and selects the best one, based on minimizing a specified metric. In this example, the metric is root mean squared error (RMSE).", "_____no_output_____" ] ], [ [ "from pyspark.ml.tuning import CrossValidator, ParamGridBuilder\nfrom pyspark.ml.evaluation import RegressionEvaluator\n \n# Define a grid of hyperparameters to test:\n# - maxDepth: maximum depth of each decision tree \n# - maxIter: iterations, or the total number of trees \nparamGrid = ParamGridBuilder()\\\n .addGrid(gbt.maxDepth, [2, 5])\\\n .addGrid(gbt.maxIter, [10, 100])\\\n .build()\n \n# Define an evaluation metric. The CrossValidator compares the true labels with predicted values for each combination of parameters, and calculates this value to determine the best model.\nevaluator = RegressionEvaluator(metricName=\"rmse\", labelCol=gbt.getLabelCol(), predictionCol=gbt.getPredictionCol())\n \n# Declare the CrossValidator, which performs the model tuning.\ncv = CrossValidator(estimator=gbt, evaluator=evaluator, estimatorParamMaps=paramGrid)", "_____no_output_____" ] ], [ [ "### Create the pipeline.", "_____no_output_____" ] ], [ [ "from pyspark.ml import Pipeline\npipeline = Pipeline(stages=[vectorAssembler, vectorIndexer, cv])", "_____no_output_____" ] ], [ [ "### Train the pipeline.\n\nNow that you have set up the workflow, you can train the pipeline with a single call.\nWhen you call fit(), the pipeline runs feature processing, model tuning, and training and returns a fitted pipeline with the best model it found. This step takes several minutes.", "_____no_output_____" ] ], [ [ "pipelineModel = pipeline.fit(train)", "_____no_output_____" ] ], [ [ "MLlib will automatically track trials in MLflow. After your tuning fit() call has completed, view the MLflow UI to see logged runs.", "_____no_output_____" ], [ "## Make predictions and evaluate results\nThe final step is to use the fitted model to make predictions on the test dataset and evaluate the model's performance. The model's performance on the test dataset provides an approximation of how it is likely to perform on new data. For example, if you had weather predictions for the next week, you could predict bike rentals expected during the next week.\n\nComputing evaluation metrics is important for understanding the quality of predictions, as well as for comparing models and tuning parameters.", "_____no_output_____" ], [ "The transform() method of the pipeline model applies the full pipeline to the input dataset. The pipeline applies the feature processing steps to the dataset and then uses the fitted GBT model to make predictions. The pipeline returns a DataFrame with a new column predictions.", "_____no_output_____" ] ], [ [ "predictions = pipelineModel.transform(test)", "_____no_output_____" ], [ "display(predictions.select(\"cnt\", \"prediction\", *featuresCols))", "_____no_output_____" ] ], [ [ "A common way to evaluate the performance of a regression model is the calculate the root mean squared error (RMSE). The value is not very informative on its own, but you can use it to compare different models. CrossValidator determines the best model by selecting the one that minimizes RMSE.", "_____no_output_____" ] ], [ [ "rmse = evaluator.evaluate(predictions)\nprint(\"RMSE on our test set: %g\" % rmse)", "RMSE on our test set: 45.7614\n" ] ], [ [ "You can also plot the results, as you did the original dataset. In this case, the hourly count of rentals shows a similar shape.", "_____no_output_____" ] ], [ [ "display(predictions.select(\"hr\", \"prediction\"))", "_____no_output_____" ] ], [ [ "It's also a good idea to examine the residuals, or the difference between the expected result and the predicted value. The residuals should be randomly distributed; if there are any patterns in the residuals, the model may not be capturing something important. In this case, the average residual is about 1, less than 1% of the average value of the cnt column.", "_____no_output_____" ] ], [ [ "import pyspark.sql.functions as F\npredictions_with_residuals = predictions.withColumn(\"residual\", (F.col(\"cnt\") - F.col(\"prediction\")))\ndisplay(predictions_with_residuals.agg({'residual': 'mean'}))", "_____no_output_____" ] ], [ [ "Plot the residuals across the hours of the day to look for any patterns. In this example, there are no obvious correlations.", "_____no_output_____" ] ], [ [ "display(predictions_with_residuals.select(\"hr\", \"residual\"))", "_____no_output_____" ] ], [ [ "## Improving the model\nHere are some suggestions for improving this model:\n\n- The count of rentals is the sum of registered and casual rentals. These two counts may have different behavior, as frequent cyclists and casual cyclists may rent bikes for different reasons. Try training one GBT model for registered and one for casual, and then add their predictions together to get the full prediction.\n- For efficiency, this notebook used only a few hyperparameter settings. You might be able to improve the model by testing more settings. A good start is to increase the number of trees by setting maxIter=200; this takes longer to train but might more accurate.\n\nThis notebook used the dataset features as-is, but you might be able to improve performance with some feature engineering. For example, the weather might have more of an impact on the number of rentals on weekends and holidays than on workdays. You could try creating a new feature by combining those two columns. MLlib provides a suite of feature transformers; find out more in the ML guide.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
c530a0282fdfc6a69ab020ee78d08d64df145758
4,959
ipynb
Jupyter Notebook
funflow-tutorial/notebooks/CCompilation/CCompilation.ipynb
EspenBerget/funflow
a904b6faf3f90d54f3b50d9902c95f6a40bf5be6
[ "MIT" ]
10
2020-11-18T20:59:39.000Z
2021-11-28T07:20:07.000Z
funflow-tutorial/notebooks/CCompilation/CCompilation.ipynb
EspenBerget/funflow
a904b6faf3f90d54f3b50d9902c95f6a40bf5be6
[ "MIT" ]
35
2020-11-18T10:07:29.000Z
2021-07-21T14:53:22.000Z
funflow-tutorial/notebooks/CCompilation/CCompilation.ipynb
tweag/funflow2
95409d5b030070609910914aa3e36c677e60d061
[ "MIT" ]
null
null
null
29.343195
307
0.556362
[ [ [ "# Compiling and running C programs\n\nAs in [the example](https://github.com/tweag/funflow/tree/v1.5.0/funflow-examples/compile-and-run-c-files) in funflow version 1, we can construct a `Flow` which compiles and executes a C program. As in the older versions of this example, we will use the `gcc` Docker image to run our compilation step.", "_____no_output_____" ] ], [ [ ":opt no-lint\n\n{-# LANGUAGE Arrows #-}\n{-# LANGUAGE OverloadedStrings #-}\n{-# LANGUAGE QuasiQuotes #-}\n\n-- Funflow libraries\nimport qualified Data.CAS.ContentStore as CS\nimport Funflow\n ( Flow,\n dockerFlow,\n ioFlow,\n getDirFlow,\n pureFlow,\n putDirFlow,\n runFlow,\n )\nimport qualified Funflow.Tasks.Docker as DE\n\n-- Other libraries\nimport Path (toFilePath, Abs, Dir, Path, File, absdir, parseAbsDir, relfile, reldir, (</>))\nimport System.Directory (getCurrentDirectory)\nimport System.Process (runCommand, ProcessHandle)", "_____no_output_____" ] ], [ [ "Similar to in Funflow version 1.x, inputs to Docker tasks are mounted in from the content store. This means that we need to copy our example c files to the content store before we can compile them:", "_____no_output_____" ] ], [ [ "-- | Helper for getting the absolute path to the src directory\nsrcDir :: () -> IO (Path Abs Dir)\nsrcDir _ = do\n cwd <- getCurrentDirectory\n cwdAbs <- parseAbsDir cwd\n return $ cwdAbs </> [reldir|./src|]\n\n-- | A `Flow` which copies the c sources to the content store\ncopyExampleToStore :: Flow () CS.Item\ncopyExampleToStore = proc _ -> do\n exampleDir <- ioFlow srcDir -< ()\n putDirFlow -< exampleDir", "_____no_output_____" ] ], [ [ "Now we can define a task which compiles the example C files using `gcc`:", "_____no_output_____" ] ], [ [ "config :: DE.DockerTaskConfig\nconfig =\n DE.DockerTaskConfig\n { DE.image = \"gcc:9.3.0\",\n DE.command = \"gcc\",\n DE.args = [ \"/example/double.c\", \"/example/square.c\", \"/example/main.c\"]\n }\n\n-- | Compile our C program and get the path to the output executable\ncompile :: Flow CS.Item CS.Item\ncompile = proc exampleItem -> do\n -- Define a volume for the example directory\n let exampleVolume = DE.VolumeBinding {DE.item = exampleItem, DE.mount = [absdir|/example/|]}\n dockerFlow config -< DE.DockerTaskInput {DE.inputBindings = [exampleVolume], DE.argsVals = mempty}", "_____no_output_____" ] ], [ [ "And finally, we can construct our full Flow graph and execute it!", "_____no_output_____" ] ], [ [ "flow :: Flow Integer ProcessHandle\nflow = proc input -> do\n -- 1. Add the example to the content store\n example <- copyExampleToStore -< ()\n \n -- 2. Compile the C sources and get the path to the new executable\n output <- compile -< example\n outputDir <- getDirFlow -< output\n exe <- pureFlow (\\x -> toFilePath (x </> [relfile|a.out|])) -< outputDir\n \n -- 3. Call the executable\n command <- pureFlow (\\(c, n) -> c <> \" \" <> show n) -< (exe, input)\n ioFlow runCommand -< command", "_____no_output_____" ], [ "-- Our C program defined in `src/main.c` defines a function f(x) = 2*x + x^2\n-- For input 3 this should output 15.\nrunFlow flow 3 :: IO ProcessHandle", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
c530a12acf6378273ed2ec91adcf5c384f686667
2,803
ipynb
Jupyter Notebook
playground.ipynb
naspert/pygsp
361f0258a210193f482c6197ea879765a9041e91
[ "BSD-3-Clause" ]
null
null
null
playground.ipynb
naspert/pygsp
361f0258a210193f482c6197ea879765a9041e91
[ "BSD-3-Clause" ]
1
2018-03-29T09:39:45.000Z
2018-03-29T09:39:45.000Z
playground.ipynb
naspert/pygsp
361f0258a210193f482c6197ea879765a9041e91
[ "BSD-3-Clause" ]
null
null
null
22.604839
260
0.556547
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
c530c42109e089cefeadae679023c80ea1253127
2,368
ipynb
Jupyter Notebook
concepts/datastore/datastore-api.ipynb
luisquintanilla/azureml-examples
024efc5a09a2b648e79ecd4c7e59e4978d81035e
[ "MIT" ]
1
2020-10-15T18:11:33.000Z
2020-10-15T18:11:33.000Z
concepts/datastore/datastore-api.ipynb
luisquintanilla/azureml-examples
024efc5a09a2b648e79ecd4c7e59e4978d81035e
[ "MIT" ]
null
null
null
concepts/datastore/datastore-api.ipynb
luisquintanilla/azureml-examples
024efc5a09a2b648e79ecd4c7e59e4978d81035e
[ "MIT" ]
null
null
null
20.413793
85
0.527449
[ [ [ "# Azure ML Datastore Python SDK\n\ndescription: overview of the AML Datastore Python SDK", "_____no_output_____" ] ], [ [ "from azureml.core import Workspace\n\nws = Workspace.from_config()\nws", "_____no_output_____" ], [ "import git\nfrom pathlib import Path\n\n# get root of git repo\nprefix = Path(git.Repo(\".\", search_parent_directories=True).working_tree_dir)", "_____no_output_____" ], [ "ds = ws.get_default_datastore()\nds", "_____no_output_____" ], [ "from azureml.core import Datastore\n\nname = \"TuringNLR\"\ncontainer_name = \"public\"\naccount_name = \"turingnlr\"\n\n# register a new datastore - use public Turing blob container\nds2 = input_ds = Datastore.register_azure_blob_container(\n ws, name, container_name, account_name\n)\nds2", "_____no_output_____" ], [ "ws.datastores", "_____no_output_____" ], [ "# upload files, then create a Dataset from the datastore and path to use\nds.upload(\n str(prefix.joinpath(\"data\", \"raw\", \"iris\")),\n target_path=\"datasets/iris\",\n show_progress=True,\n)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
c530ec8dd373dc65218a9302990a85afc2d6b3cd
43,251
ipynb
Jupyter Notebook
lectures/data_manipulation/data_manipulation/modulos_pandas/pivot.ipynb
sergio825/mat281_portafolio
09f59aa3a0cc024fa1306aa0821d10bdde34c072
[ "MIT" ]
null
null
null
lectures/data_manipulation/data_manipulation/modulos_pandas/pivot.ipynb
sergio825/mat281_portafolio
09f59aa3a0cc024fa1306aa0821d10bdde34c072
[ "MIT" ]
null
null
null
lectures/data_manipulation/data_manipulation/modulos_pandas/pivot.ipynb
sergio825/mat281_portafolio
09f59aa3a0cc024fa1306aa0821d10bdde34c072
[ "MIT" ]
null
null
null
28.213307
313
0.331276
[ [ [ "# Pivot", "_____no_output_____" ], [ "## Formato Wide y Formato Long\n\nDentro del mundo de los dataframe (o datos tabulares) existen dos formas de presentar la naturaleza de los datos: formato wide y formato long. \n", "_____no_output_____" ], [ "Por ejemplo, el conjunto de datos [Zoo Data Set](http://archive.ics.uci.edu/ml/datasets/zoo) presenta las características de diversos animales, de los cuales presentamos las primeras 5 columnas.\n\n|animal_name|hair|feathers|eggs|milk|\n|-----------|----|--------|----|----|\n|antelope|1|0|0|1|\n|bear|1|0|0|1|\n|buffalo|1|0|0|1|\n|catfish|0|0|1|0|\n\nLa tabla así presentada se encuentra en **wide format**, es decir, donde los valores se extienden a través de las columnas.", "_____no_output_____" ], [ "Sería posible representar el mismo contenido anterior en **long format**, es decir, donde los mismos valores se indicaran a través de las filas:\n\n|animal_name|characteristic|value|\n|-----------|----|--------|\n|antelope|hair |1|\n|antelope|feathers|0|\n|antelope|eggs|0|\n|antelope|milk|1|\n|...|...|...|...|..|\n|catfish|hair |0|\n|catfish|feathers|0|\n|catfish|eggs|1|\n|catfish|milk|0|", "_____no_output_____" ], [ "<img src=\"images/wide_and_long.png\" align=\"center\"/>\n", "_____no_output_____" ], [ "En python existen maneras de pasar del formato **wide** al formato **long** y viceversa.", "_____no_output_____" ], [ "## Pivotear y despivotear tablas", "_____no_output_____" ], [ "### Pivot\n\nEl pivoteo de una tabla corresponde al paso de una tabla desde el formato **long** al formato **wide**. Típicamente esto se realiza para poder comparar los valores que se obtienen para algún registro en particular, o para utilizar algunas herramientas de visualización básica que requieren dicho formato.\n\nPara ejemplificar estos resultados, ocupemos el conjunto de datos **terremotos.csv**, con contiene los registros de terremotos de distintos paises desde el año 2000 al 2011.\n\n<img src=\"./images/logo_terremoto.png\" alt=\"\" align=\"center\" width=\"300\"/>\n", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport os", "_____no_output_____" ], [ "# formato long\ndf = pd.read_csv(os.path.join(\"data\",\"terremotos.csv\"), sep=\",\")\ndf.head()", "_____no_output_____" ] ], [ [ "Por ejemplo, se quiere saber el terremoto de mayor magnitud a nivel de país año. Tenemos dos formas de mostrar la información.", "_____no_output_____" ] ], [ [ "# formato long\ndf.groupby(['Pais','Año']).max()", "_____no_output_____" ], [ "# formato wide\ndf.pivot_table(index=\"Pais\", columns=\"Año\", values=\"Magnitud\", fill_value='', aggfunc=pd.np.max)", "/home/falfaro/.cache/pypoetry/virtualenvs/pymessi-xyyw3p3f-py3.6/lib/python3.6/site-packages/ipykernel_launcher.py:2: FutureWarning: The pandas.np module is deprecated and will be removed from pandas in a future version. Import numpy directly instead\n \n" ] ], [ [ "### Despivotear un tabla\n\nEl despivotear una tabla corresponde al paso de una tabla desde el formato **wide** al formato **long**. \n\nSe reconocen dos situaciones:\n\n1. El valor indicado para la columna es único, y sólo se requiere definir correctamente las columnas.\n2. El valor indicado por la columna no es único o requiere un procesamiento adicional, y se requiere una iteración más profunda.\n\nPara ejemplificar esto, crearemos un conjunto de datos con los horarios de los ramos que se tiene que dictar en un determinado día, hora y lugar.", "_____no_output_____" ], [ "\n<img src=\"./images/logo_classroom.png\" alt=\"\" align=\"center\" width=\"400px\"/>\n", "_____no_output_____" ], [ "**a) El valor indicado para la columna es único**", "_____no_output_____" ] ], [ [ "columns = [\"sala\",\"dia\",\"08:00\",\"09:00\",\"10:00\"]\ndata = [\n [\"C201\",\"Lu\", \"mat1\",\"mat1\", \"\"],\n [\"C201\",\"Ma\", \"\",\"\",\"\"],\n [\"C202\",\"Lu\", \"\",\"\",\"\"],\n [\"C202\",\"Ma\", \"mat1\",\"mat1\", \"\"],\n [\"C203\",\"Lu\", \"fis1\",\"fis1\",\"fis1\"],\n [\"C203\",\"Ma\", \"fis1\",\"fis1\",\"fis1\"],\n ]\ndf = pd.DataFrame(data=data, columns=columns)\ndf", "_____no_output_____" ], [ "# Despivotear incorrectamente la tabla\ndf.melt(id_vars=[\"sala\"], var_name=\"hora\", value_name=\"curso\")", "_____no_output_____" ], [ "# Despivotear correctamente la tabla\ndf_melt = df.melt(id_vars=[\"sala\", \"dia\"], var_name=\"hora\", value_name=\"curso\")\ndf_melt[df_melt.curso!=\"\"].sort_values([\"sala\",\"dia\",\"hora\"])", "_____no_output_____" ] ], [ [ "**b) Relaciones no únicas**", "_____no_output_____" ] ], [ [ "columns = [\"sala\",\"curso\",\"Lu\",\"Ma\",\"hora\"]\ndata = [\n [\"C201\",\"mat1\",\"X\",\"\",\"8:00-10:00\"],\n [\"C202\",\"mat1\",\"\",\"X\",\"8:00-10:00\"],\n [\"C203\",\"fis1\",\"X\",\"X\",\"8:00-11:00\"],\n ]\ndf = pd.DataFrame(data=data, columns=columns)\ndf", "_____no_output_____" ] ], [ [ "#### Métodos", "_____no_output_____" ], [ "**método 01: Despivotear manualmente y generar un nuevo dataframe**\n\n* **Ventajas**: Si se puede es una solución directa y rápida.\n* **Desventaja**: requiere programación explícita de la tarea, no es reutilizable.", "_____no_output_____" ] ], [ [ "# Obtener el día lunes\ndf_Lu = df.loc[df.Lu==\"X\", [\"sala\",\"curso\",\"hora\"]]\ndf_Lu[\"dia\"] = \"Lu\"\ndf_Lu", "_____no_output_____" ], [ "# Obtener el día martes\ndf_Ma = df.loc[df.Ma==\"X\", [\"sala\",\"curso\",\"hora\"]]\ndf_Ma[\"dia\"] = \"Ma\"\ndf_Ma", "_____no_output_____" ], [ "# Juntar\npd.concat([df_Lu,df_Ma])", "_____no_output_____" ] ], [ [ "**método 02: Iterar sobre las filas y generar contenido para un nuevo dataframe**\n* **Ventajas**: En general, fácil de codificar.\n* **Desventaja**: puede ser lento, es ineficiente.", "_____no_output_____" ] ], [ [ "my_columns = [\"sala\",\"curso\",\"dia\",\"hora\"]\nmy_data = []\nfor i, df_row in df.iterrows():\n # Procesar cada fila\n if df_row.Lu==\"X\":\n my_row = [df_row.sala, df_row.curso, \"Lu\", df_row.hora]\n my_data.append(my_row)\n if df_row.Ma==\"X\":\n my_row = [df_row.sala, df_row.curso, \"Ma\", df_row.hora]\n my_data.append(my_row)\nnew_df = pd.DataFrame(data=my_data, columns=my_columns)\nnew_df", "_____no_output_____" ] ], [ [ "## Referencia\n\n1. [Reshaping and pivot tables](https://pandas.pydata.org/pandas-docs/stable/user_guide/reshaping.html)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
c531054f75833069d0f01e1db599549b9c643141
555,862
ipynb
Jupyter Notebook
Shah_Baltimore_Denver_Job_Analysis.ipynb
cshah13/workforce-opportunities-baltimore-denver
70f3c3313ea72c9613c4ba7c5183e3ad75d5a1b9
[ "MIT" ]
null
null
null
Shah_Baltimore_Denver_Job_Analysis.ipynb
cshah13/workforce-opportunities-baltimore-denver
70f3c3313ea72c9613c4ba7c5183e3ad75d5a1b9
[ "MIT" ]
null
null
null
Shah_Baltimore_Denver_Job_Analysis.ipynb
cshah13/workforce-opportunities-baltimore-denver
70f3c3313ea72c9613c4ba7c5183e3ad75d5a1b9
[ "MIT" ]
null
null
null
210.873293
207,070
0.819801
[ [ [ "<a href=\"https://colab.research.google.com/github/cshah13/workforce-opportunities-baltimore-denver/blob/main/Shah_Baltimore_Denver_Job_Analysis.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Import Libraries\n", "_____no_output_____" ] ], [ [ "# import libraries\n\n# data analysis\nimport pandas as pd\n\n# data visualization\nimport matplotlib.pyplot as plt\nimport plotly.express as px\n\n# download files to our computer\nfrom google.colab import files ", "_____no_output_____" ] ], [ [ "# Import Data for Baltimore Job Availability\n", "_____no_output_____" ], [ "Imported data from [this github here](https://github.com/cshah13/workforce-opportunities-baltimore-denver)", "_____no_output_____" ] ], [ [ "# import csv of baltimore city job availability data\n\n# save github csv link\njob_data = \"https://raw.githubusercontent.com/cshah13/workforce-opportunities-baltimore-denver/main/Original%20Baltimore%20Job%20Data%20CSV.csv\"\n\n#define our initial dataframe\ndf_job = pd.read_csv(job_data)", "_____no_output_____" ] ], [ [ "# Look at Our Data\n", "_____no_output_____" ] ], [ [ "# preview the first five rows\ndf_job.head()", "_____no_output_____" ], [ "# preview last five rows of the data\ndf_job.tail()", "_____no_output_____" ], [ "# general stats to help us understand the data\ndf_job.describe()", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "# delete the tract column\n\ndel df_job[\"tract\"]", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "df_job.head()", "_____no_output_____" ] ], [ [ "Remove Non-Baltimore Data", "_____no_output_____" ] ], [ [ "df_job['location'].str.contains(\"baltimore\")", "_____no_output_____" ], [ "df_baltimore = df_job[df_job['location'].str.contains(\"Baltimore\")]\n", "_____no_output_____" ], [ "df_baltimore.head()", "_____no_output_____" ], [ "df_baltimore = df_baltimore.rename(columns = {\"location\" : \"location\", \"availability_of_jobs_in_2013\" : \"jobs_per_sq_mile\"})", "_____no_output_____" ], [ "df_baltimore.head()", "_____no_output_____" ], [ "# general stats to help us understand the data\ndf_baltimore.describe()", "_____no_output_____" ] ], [ [ "# Create a Bar Graph for Baltimore Job Availability", "_____no_output_____" ] ], [ [ "# plot average number of jobs available per square mile in a bar graph\ndf_baltimore.plot(x = \"location\", y = \"jobs_per_sq_mile\", kind = \"bar\", figsize = (45,8))", "_____no_output_____" ], [ "# add graph labels\nbmore_jobs_fig = plt.figure()\n\ndf_baltimore.plot(x = \"location\", y = \"jobs_per_sq_mile\", kind = \"bar\", figsize = (45,8), title = \"Average Number of Jobs Available per Square Mile in Baltimore, MD\")\nplt.xlabel(\"Areas in Baltimore\")\nplt.ylabel(\"Average Number of Jobs Available Per Square Mile\")", "_____no_output_____" ], [ "# save our graph\nbmore_jobs_fig.savefig(\"jobs_bmore.png\")", "_____no_output_____" ], [ "#downloading the files from google colab\nfiles.download(\"jobs_bmore.png\")", "_____no_output_____" ], [ "# melt dataframe to work easier with plotly express\ndf_agg_melt = pd.melt(df_baltimore, id_vars = [\"location\"])", "_____no_output_____" ], [ "df_agg_melt.head()", "_____no_output_____" ], [ "# make bar graph in plotly express\nbmore_job_fig = px.bar(df_baltimore, x = 'location', y = 'jobs_per_sq_mile', title=\"Average Number of Jobs Available per Square Mile in Baltimore, MD\", labels = {\"location\": \"Areas in Baltimore\", \"jobs_per_sq_mile\": \"Average Number of Jobs Available Per Square Mile\"}, width=1200, height=1000)\nbmore_job_fig.show()\n\n", "_____no_output_____" ], [ "# save an html file \nbmore_job_fig.write_html(\"plotly_bar_bmorejobs.html\")", "_____no_output_____" ], [ "#download from google\nfiles.download(\"plotly_bar_bmorejobs.html\")", "_____no_output_____" ], [ "#remove repeat locations\ndf_avgbmorejob = df_baltimore.groupby(\"location\")[\"jobs_per_sq_mile\"].agg([\"mean\"]).reset_index()", "_____no_output_____" ], [ "df_avgbmorejob.head()", "_____no_output_____" ], [ "#recreate graph\nbmorejob__fig = px.bar(df_avgbmorejob, x = 'location', y = 'mean', title=\"Average Number of Jobs Available per Square Mile in Baltimore, MD\", labels = {\"location\": \"Areas in Baltimore\", \"mean\": \"Average Number of Jobs Available Per Square Mile\"}, width=1200, height=1000,)\nbmorejob__fig.update_layout(barmode='stack', xaxis={'categoryorder':'total descending'})\nbmorejob__fig.show()", "_____no_output_____" ] ], [ [ "Import Data for Denver Job Availability", "_____no_output_____" ], [ "Imported data from [this github here](https://github.com/cshah13/workforce-opportunities-baltimore-denver)", "_____no_output_____" ] ], [ [ "# import csv of denver job availability data\n\n# save github csv link\ndenverjob_data = \"https://raw.githubusercontent.com/cshah13/workforce-opportunities-baltimore-denver/main/Original%20Denver%20Job%20Data%20CSV.csv\"\n\n#define our initial dataframe\ndf_denverjob = pd.read_csv(denverjob_data)", "_____no_output_____" ], [ "# preview the first five rows\ndf_denverjob.head()", "_____no_output_____" ], [ "# preview last five rows of the data\ndf_denverjob.tail()", "_____no_output_____" ], [ "# delete the tract column\n\ndel df_denverjob[\"tract\"]", "_____no_output_____" ], [ "df_denverjob.head()", "_____no_output_____" ], [ "# general stats to help us understand the data\ndf_denverjob.describe()", "_____no_output_____" ] ], [ [ "Remove Non-Denver Data", "_____no_output_____" ] ], [ [ "df_denver = df_denverjob[df_denverjob['location'].str.contains(\"Denver\")]", "_____no_output_____" ], [ "df_denverjob.head()", "_____no_output_____" ], [ "# general stats to help us understand the data\ndf_denver.describe()", "_____no_output_____" ], [ "", "_____no_output_____" ] ], [ [ "# Create a Bar Graph with Denver Job Availability Data", "_____no_output_____" ] ], [ [ "# melt dataframe to work easier with plotly express\ndf_agg_meltt = pd.melt(df_denver, id_vars = [\"location\"])", "_____no_output_____" ], [ "df_agg_meltt.head()", "_____no_output_____" ], [ "# make bar graph in plotly express\nden_job_fig = px.bar(df_denver, x = 'location', y = 'availability_of_jobs_in_2013', title=\"Average Number of Jobs Available per Square Mile in Denver, CO\", labels = {\"location\": \"Areas in Denver\", \"availability_of_jobs_in_2013\": \"Average Number of Jobs Available Per Square Mile\"}, width=1200, height=1200)\nden_job_fig.show()", "_____no_output_____" ], [ "# save an html file \nden_job_fig.write_html(\"plotly_bar_denjobs.html\")", "_____no_output_____" ], [ "#download from google\nfiles.download(\"plotly_bar_denjobs.html\")", "_____no_output_____" ], [ "#remove repeat locations\ndf_avgdenverjob = df_denver.groupby(\"location\")[\"availability_of_jobs_in_2013\"].agg([\"mean\"]).reset_index()", "_____no_output_____" ], [ "df_avgdenverjob.head()", "_____no_output_____" ], [ "# redo bar graph in plotly express\ndennew_fig = px.bar(df_avgdenverjob, x = 'location', y = 'mean', title=\"Average Number of Jobs Available per Square Mile in Denver, CO\", labels = {\"location\": \"Areas in Denver\", \"mean\": \"Average Number of Jobs Available Per Square Mile\"}, width=1200, height=1200)\ndennew_fig.update_layout(barmode='stack', xaxis={'categoryorder':'total descending'})\ndennew_fig.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
c53105d3fe4c89cda9ec915919e432a34b0dbd82
78,555
ipynb
Jupyter Notebook
sveske/sources/01_BasicTraining/01_basic_training.ipynb
sandraASMD/unibl_radionica
3b36e07fd3835ea9bfc63ed3a0fd25ef620a99d3
[ "BSD-3-Clause" ]
2
2019-09-18T19:21:44.000Z
2019-09-19T00:00:25.000Z
sveske/sources/01_BasicTraining/01_basic_training.ipynb
sandraASMD/unibl_radionica
3b36e07fd3835ea9bfc63ed3a0fd25ef620a99d3
[ "BSD-3-Clause" ]
null
null
null
sveske/sources/01_BasicTraining/01_basic_training.ipynb
sandraASMD/unibl_radionica
3b36e07fd3835ea9bfc63ed3a0fd25ef620a99d3
[ "BSD-3-Clause" ]
34
2019-09-18T14:39:38.000Z
2019-09-20T06:45:07.000Z
19.062121
482
0.447355
[ [ [ "## Basic Training\nUC Berkeley Python Bootcamp", "_____no_output_____" ] ], [ [ "print(\"Hello, world.\")", "Hello, world.\n" ] ], [ [ "# Calculator #\n\n> there are `int` and `float` (but not doubles)", "_____no_output_____" ] ], [ [ "print(2 + 2)", "4\n" ], [ "2 + 2 ", "_____no_output_____" ], [ "print(2.1 + 2)", "4.1\n" ], [ "2.1 + 2 == 4.0999999999999996", "_____no_output_____" ], [ "%run talktools", "_____no_output_____" ] ], [ [ " - Python stores floats as their byte representation so is limited by the same 16-bit precision issues as most other languages\n\n - In doing calculations, unless you specify otherwise, Python will store the results in the smallest-byte representation", "_____no_output_____" ], [ "> 1. Indentation matters!\n> 2. When you mess up, Python is gentle\n> 3. \\# starts a comments (until the end of the line)", "_____no_output_____" ] ], [ [ "print(2 + 2)\n 2 + 2", "_____no_output_____" ], [ "2 # this is a comment and is not printed", "_____no_output_____" ], [ "# this is also a comment", "_____no_output_____" ] ], [ [ " &nbsp;", "_____no_output_____" ], [ "** Calculator **\n\n - In Python 3, there is no distinction between `int` and `long`\n", "_____no_output_____" ] ], [ [ "42**42", "_____no_output_____" ], [ "(42**42).bit_length()", "_____no_output_____" ], [ "bin(42**42)", "_____no_output_____" ] ], [ [ "Division always leads to a float", "_____no_output_____" ] ], [ [ "2 / 2", "_____no_output_____" ], [ "2 / 2.0", "_____no_output_____" ] ], [ [ "Note: This is an important difference between Python 2 and Python 3. Old-style division between `int`s can be done with a double slash `\\\\`", "_____no_output_____" ] ], [ [ "2 // 2", "_____no_output_____" ], [ "3 // 2", "_____no_output_____" ], [ "2.5 // 2 # egad, dont do this.", "_____no_output_____" ] ], [ [ "There is also `complex` types", "_____no_output_____" ] ], [ [ "complex(1,2)", "_____no_output_____" ], [ "1+2j", "_____no_output_____" ], [ "1 + 2j - 2j", "_____no_output_____" ] ], [ [ "Note: Access to [`decimal`](https://docs.python.org/3/library/decimal.html#module-decimal) (decimal fixed point and floating point arithmetic) and [`fraction`](https://docs.python.org/3/library/fractions.html#module-fractions) types/operations is through built-in `modules`.", "_____no_output_____" ], [ " &nbsp;", "_____no_output_____" ], [ "Let's do some math", "_____no_output_____" ] ], [ [ "(3.0*10.0 - 25.0)/5.0", "_____no_output_____" ], [ "print(3.085e18*1e6) # this is a Megaparsec in units of cm!", "3.085e+24\n" ], [ "t = 1.0 # declare a variable t (time)\naccel = 9.8 # acceleration in units of m/s^2", "_____no_output_____" ], [ "# distance travelled in time t seconds is 1/2 a*t**2\ndist = 0.5*accel*t*t\nprint(dist) # this is the distance in meters", "4.9\n" ], [ "dist1 = accel*(t**2)/2\nprint(dist1)", "4.9\n" ], [ "dist2 = 0.5*accel*pow(t,2)\nprint(dist2)", "4.9\n" ] ], [ [ " - **variables** are assigned on the fly\n - multiplication, division, exponents as you expect", "_____no_output_____" ] ], [ [ "print(6 / 5) ; print(9 / 5)", "1.2\n1.8\n" ], [ "print(6 // 5) ; print(9 // 5) # remember double-slash integer division returns the floor", "1\n1\n" ], [ "6 % 5 # mod operator", "_____no_output_____" ], [ "1 << 2 ## shift: move the number 1 by two bits to the left\n ## that is make a new number 100 (base 2)", "_____no_output_____" ], [ "5 >> 1 ## shift: move the number 5 = 101 (base 2) one to\n ## to the right (10 = 2)", "_____no_output_____" ], [ "x = 2 ; y = 3 ## assign two variables on the same line!\nx | y ## bitwise OR", "_____no_output_____" ], [ "x ^ y ## exclusive OR (10 ^ 11 = 01)", "_____no_output_____" ], [ "x & y ## bitwise AND", "_____no_output_____" ], [ "x = x ^ y ; print(x)", "1\n" ], [ "x += 3 ; print(x)", "4\n" ], [ "x /= 2.0 ; print(x)", "2.0\n" ] ], [ [ "we'll see a lot more mathy operators and functions later", "_____no_output_____" ], [ "## Relationships ##", "_____no_output_____" ] ], [ [ "# from before dist1 = 4.9 and dist = 4.9\ndist1 == dist", "_____no_output_____" ], [ "dist < 10", "_____no_output_____" ], [ "dist <= 4.9", "_____no_output_____" ], [ "dist < (10 + 2j)", "_____no_output_____" ], [ "dist < -2.0", "_____no_output_____" ], [ "dist != 3.1415", "_____no_output_____" ] ], [ [ " &nbsp;", "_____no_output_____" ], [ "** More on Variables & Types **", "_____no_output_____" ] ], [ [ "0 == False", "_____no_output_____" ], [ "not False", "_____no_output_____" ], [ "0.0 == False", "_____no_output_____" ], [ "not (10.0 - 10.0)", "_____no_output_____" ], [ "not -1", "_____no_output_____" ], [ "not 3.1415", "_____no_output_____" ], [ "x = None # None is something special. Not true or false\nNone == False", "_____no_output_____" ], [ "None == True", "_____no_output_____" ], [ "False or True", "_____no_output_____" ], [ "False and True", "_____no_output_____" ], [ "float(\"nan\") == True", "_____no_output_____" ] ], [ [ " &nbsp;", "_____no_output_____" ], [ "** More on Variables & Types **", "_____no_output_____" ] ], [ [ "print(type(1))", "<class 'int'>\n" ], [ "x = 2 ; type(x)", "_____no_output_____" ], [ "type(2) == type(1)", "_____no_output_____" ], [ "print(type(True))", "<class 'bool'>\n" ], [ "print(type(type(1)))", "<class 'type'>\n" ], [ "print(type(pow))", "<class 'builtin_function_or_method'>\n" ] ], [ [ " &nbsp;", "_____no_output_____" ], [ "we can test whether something is a certain type with **`isinstance()`**", "_____no_output_____" ] ], [ [ "isinstance(1,int)", "_____no_output_____" ], [ "isinstance(1,(int,float))", "_____no_output_____" ], [ "isinstance(\"spam\",str)", "_____no_output_____" ], [ "isinstance(1.212,int)", "_____no_output_____" ], [ "isinstance(1.212,int)", "_____no_output_____" ] ], [ [ "We'll see later than numbers are an instance of an object, which have methods that can act upon itself:", "_____no_output_____" ] ], [ [ "(1.212).is_integer()", "_____no_output_____" ], [ "(1.0).is_integer()", "_____no_output_____" ] ], [ [ "builtin-types: **`int`**, **`bool`**, **`str`**, **`float`**, **`complex`**", "_____no_output_____" ], [ "# Strings", "_____no_output_____" ], [ "Strings are a sequence of characters\n- they can be indexed and sliced up as if they were an array\n- you can glue strings together with + signs\n\nStrings are **immutable** (unlike in C), so you cannot change a string in place (this isn't so bad...)\n\nStrings can be formatted and compared ", "_____no_output_____" ] ], [ [ ">>> x = \"spam\" ; print(type(x))", "<class 'str'>\n" ], [ "print(\"hello!\\n...my sire.\")", "hello!\n...my sire.\n" ], [ "\"hello!\\n...my sire.\"", "_____no_output_____" ], [ "\"wah?!\" == 'wah?!'", "_____no_output_____" ], [ "print(\"'wah?!' said the student\")", "'wah?!' said the student\n" ], [ "print(\"\\\"wah?!\\\" said the student\")", "\"wah?!\" said the student\n" ] ], [ [ "backslashes (\\\\) start special (escape) characters:\n```\n \\n = newline (\\r = return)\n \\t = tab\n \\a = bell\n```\nstring literals are defined with double quotes or quotes.\nThe outermost quote type cannot be used inside the string (unless it's escaped with a backslash)\n\nSee: http://docs.python.org/reference/lexical_analysis.html#string-literals", "_____no_output_____" ] ], [ [ "print(\"\\a\\a\\a\")", "\u0007\u0007\u0007\n" ], [ "# raw strings don't escape characters\nprint(r'This is a raw string...newlines \\r\\n are ignored.')", "This is a raw string...newlines \\r\\n are ignored.\n" ], [ "# Triple quotes are real useful for multiple line strings\ny = '''For score and seven minutes ago,\n you folks all learned some basic mathy stuff with Python\n and boy were you blown away!'''\nprint(y)", "For score and seven minutes ago,\n you folks all learned some basic mathy stuff with Python\n and boy were you blown away!\n" ] ], [ [ "\n - prepending ``r`` makes that string \"raw\"\n\n - triple quotes allow you to compose long strings\n \n https://docs.python.org/3.4/reference/lexical_analysis.html#literals", "_____no_output_____" ] ], [ [ "print(\"\\N{RIGHT CURLY BRACKET}\")", "}\n" ], [ "print(\"\\N{BLACK HEART SUIT}\")", "♥\n" ] ], [ [ "http://www.fileformat.info/info/unicode/char/search.htm", "_____no_output_____" ] ], [ [ "s = \"spam\" ; e = \"eggs\"\nprint(s + e)", "spameggs\n" ], [ "print(\"spam\"\n \"eggs\"\n \"Trumpkins\")", "spameggsTrumpkins\n" ], [ "print(s\n \"eggs\")", "_____no_output_____" ], [ "print(s + \" and \" + e)", "spam and eggs\n" ], [ "print(s,\"and\",e, sep=\" \")", "spam and eggs\n" ], [ "print(\"green \" + e + \" and\\n \" + s + \"\\n\\t ... and Trumpkins\")", "green eggs and\n spam\n\t ... and Trumpkins\n" ], [ "print(s*3 + e)", "spamspamspameggs\n" ], [ "print(s*3,e,sep=\"->\")", "spamspamspam->eggs\n" ], [ "print(\"*\"*50)", "**************************************************\n" ], [ "print(\"spam\" == \"good\") ; print(\"spam\" == \"spam\")", "False\nTrue\n" ], [ "\"spam\" < \"zoo\"", "_____no_output_____" ], [ "\"s\" < \"spam\"", "_____no_output_____" ] ], [ [ " - you can concatenate strings with ``+`` sign\n - you can do multiple concatenations with the ``*`` sign\n - strings can be compared", "_____no_output_____" ] ], [ [ "print('I want' + 3 + ' eggs and no ' + s)", "_____no_output_____" ], [ "print('I want ' + str(3) + ' eggs and no ' + s) ", "I want 3 eggs and no spam\n" ], [ "pi = 3.14159\nprint('I want ' + str(pi) + ' eggs and no ' + s)", "I want 3.14159 eggs and no spam\n" ], [ "print(str(True) + \":\" + ' I want ' + str(pi) + ' eggs and no ' + s)", "True: I want 3.14159 eggs and no spam\n" ] ], [ [ "you must concatenate only strings, coercing (\"casting\") \nother variable types to `str`", "_____no_output_____" ], [ "there's a cleaner way to do this, with string formatting. we'll see that tomorrow.", "_____no_output_____" ], [ "### Getting input from the user: always a string response", "_____no_output_____" ] ], [ [ "faren = input(\"Enter the temperature (in Fahrenheit): \")", "Enter the temperature (in Fahrenheit): 3.4\n" ], [ "cent = (5.0/9.0)*(faren - 32.0)", "_____no_output_____" ], [ "faren = float(faren)\ncent = (5.0/9.0)*(faren - 32.0) ; print(cent)", "-15.888888888888891\n" ], [ "faren = float(input(\"Enter the temperature (in Fahrenheit): \"))\nprint((5.0/9.0)*(faren - 32.0))", "Enter the temperature (in Fahrenheit): 23\n-5.0\n" ] ], [ [ " &nbsp;", "_____no_output_____" ], [ "#### We can think of strings as arrays (although, unlike in C you never really need to deal with directly addressing character locations in memory)", "_____no_output_____" ] ], [ [ "s =\"spam\"\nlen(s)", "_____no_output_____" ], [ "len(\"eggs\\n\")", "_____no_output_____" ], [ "len(\"\")", "_____no_output_____" ], [ "s[0]", "_____no_output_____" ], [ "s[-1]", "_____no_output_____" ] ], [ [ " - ``len()`` gives us the length of an array\n - strings are zero indexed\n - can also count backwards", "_____no_output_____" ], [ "We can think of strings as arrays\n(although, unlike in C you never really need to deal with directly addressing character locations in memory)", "_____no_output_____" ], [ "<img src=\"https://raw.github.com/profjsb/python-bootcamp/master/Lectures/01_BasicTraining/spam.png\">", "_____no_output_____" ], [ "useful for slicing: indices are between the characters", "_____no_output_____" ], [ "<img src=\"https://raw.github.com/profjsb/python-bootcamp/master/Lectures/01_BasicTraining/spam.png\">", "_____no_output_____" ] ], [ [ "s[0:1] # get every character between 0 and 1", "_____no_output_____" ], [ "s[1:4] # get every character between 1 and 4 ", "_____no_output_____" ], [ "s[-2:-1] ", "_____no_output_____" ], [ "## slicing [m:n] will return abs(n-m) characters\ns[0:100] # if the index is beyond the len(str), you dont segfault!", "_____no_output_____" ], [ "s[1:] # python runs the index to the end", "_____no_output_____" ], [ "s[:2] # python runs the index to the beginning", "_____no_output_____" ], [ "s[::-1] # print it out backwards", "_____no_output_____" ] ], [ [ " s = s[:n] + s[n:] for all n", "_____no_output_____" ], [ "## Basic Control (Flow)", "_____no_output_____" ], [ "Python has pretty much all of what you use:\n\n if...elif...else, for, while\n\nAs well as:\n\n break, continue (within loops)\n \nDoes not have:\n\n case (explicitly), goto\n\nDoes have: `pass`", "_____no_output_____" ], [ "### Flow is done within blocks (where indentation matters)", "_____no_output_____" ] ], [ [ "x = 1\nif x > 0:\n print(\"yo\")\nelse:\n print(\"dude\")", "yo\n" ] ], [ [ "Note: if you are doing this within the Python interpreter you'll see the ...\n```\n>>> x = 1\n>>> if x > 0:\n... print \"yo\"\n... else:\n... print \"dude\"\n... \nyo\n```", "_____no_output_____" ], [ "Note colons & indentations (tabbed or spaced)", "_____no_output_____" ] ], [ [ "x = 1\nif x > 0:\n print(\"yo\")\nelse:\n print(\"dude\")", "yo\n" ] ], [ [ "Indentations with the same block must be the same but not within different blocks (though this is ugly)", "_____no_output_____" ], [ "one-liners", "_____no_output_____" ] ], [ [ "print(\"yo\" if x > 0 else \"dude\")", "yo\n" ] ], [ [ "a small program... Do Control-C to stop (in Python/IPython) or \"Kernel->Interrupt\" in IPython notebook", "_____no_output_____" ] ], [ [ "x = 1\ny = 0\nwhile True:\n print(\"yo\" if x > 0 else \"dude\")\n x *= -1\n y += 1\n if y > 42:\n break", "yo\ndude\nyo\ndude\nyo\ndude\nyo\ndude\nyo\ndude\nyo\ndude\nyo\ndude\nyo\ndude\nyo\ndude\nyo\ndude\nyo\ndude\nyo\ndude\nyo\ndude\nyo\ndude\nyo\ndude\nyo\ndude\nyo\ndude\nyo\ndude\nyo\ndude\nyo\ndude\nyo\ndude\nyo\n" ] ], [ [ "case statements can be constructed with \njust a bunch of if, elif,...else", "_____no_output_____" ] ], [ [ "if x < 1:\n print(\"t\")\nelif x > 100:\n print(\"yo\")\nelse:\n print(\"dude\")", "t\n" ] ], [ [ "ordering matters. The first block of `True` in an if/elif gets executed then everything else does not.", "_____no_output_____" ], [ "blocks cannot be empty", "_____no_output_____" ] ], [ [ "x = \"fried goldfish\"\nif x == \"spam for dinner\":\n print(\"I will destroy the universe\")\nelse:\n # I'm fine with that. I'll do nothing", "_____no_output_____" ] ], [ [ "`pass` is a \"do nothing\" statement", "_____no_output_____" ] ], [ [ "if x == \"spam for dinner\":\n print(\"I will destroy the universe\")\nelse:\n # I'm fine with that. I'll do nothing\n pass", "_____no_output_____" ] ], [ [ "The double percent sign at the top of an IPython/Jupyter cell is a cell-level \"magic\". It's not Python itself, but defined as part of IPython/Jupyter. We'll see more on this later in the bootcamp.", "_____no_output_____" ] ], [ [ "%%file temp1.py\n# set some initial variables. Set the initial temperature low \nfaren = -1000\n\n# we dont want this going on forever, let's make sure we cannot have too many attempts \nmax_attempts = 6\nattempt = 0\n\nwhile faren < 100:\n # let's get the user to tell us what temperature it is \n newfaren = float(input(\"Enter the temperature (in Fahrenheit): \"))\n if newfaren > faren:\n print(\"It's getting hotter\")\n elif newfaren < faren:\n print(\"It's getting cooler\")\n else:\n # nothing has changed, just continue in the loop \n continue\n faren = newfaren # now set the current temp to the new temp just entered \n attempt += 1 # bump up the attempt number \n if attempt >= max_attempts:\n # we have to bail out \n break\nif attempt >= max_attempts:\n # we bailed out because of too many attempts \n print(\"Too many attempts at raising the temperature.\")\nelse:\n # we got here because it's hot \n print(\"it's hot here, people.\")", "Overwriting temp1.py\n" ], [ "%run temp1", "Enter the temperature (in Fahrenheit): 12\nIt's getting hotter\nEnter the temperature (in Fahrenheit): 14\nIt's getting hotter\nEnter the temperature (in Fahrenheit): 12\nIt's getting cooler\nEnter the temperature (in Fahrenheit): -1\nIt's getting cooler\nEnter the temperature (in Fahrenheit): 101\nIt's getting hotter\nit's hot here, man.\n" ], [ "%run temp1", "Enter the temperature (in Fahrenheit): 100\nIt's getting hotter\nit's hot here, people.\n" ], [ "%%file temp2.py\n\n# set some initial variables. Set the initial temperature low \nfaren = -1000\n\n# we dont want this going on forever, let's make sure we cannot have too many attempts \nmax_attempts = 6\nattempt = 0\n\nwhile faren < 100 and (attempt < max_attempts):\n # let's get the user to tell us what temperature it is \n newfaren = float(input(\"Enter the temperature (in Fahrenheit): \"))\n if newfaren > faren:\n print(\"It's getting hotter\")\n elif newfaren < faren:\n print(\"It's getting cooler\")\n else:\n # nothing has changed, just continue in the loop \n continue\n faren = newfaren # now set the current temp to the new temp just entered \n attempt += 1 # bump up the attempt number \n\nif attempt >= max_attempts:\n # we bailed out because of too many attempts \n print(\"Too many attempts at raising the temperature.\")\nelse:\n # we got here because it's hot \n print(\"it's hot here, people.\")", "Overwriting temp2.py\n" ] ], [ [ "UC Berkeley Python Bootcamp - Basic Training\n(c) J. Bloom 2008-2016 All Rights Reserved", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ] ]
c531091ad34cf446bca636a8f903601d8ea42dbe
66,425
ipynb
Jupyter Notebook
Support notebooks/Create time series.ipynb
MoshaLangerak/Data_Challenge_2_Group_18
34325993376edb967723611d0a86ccef748932d1
[ "MIT" ]
null
null
null
Support notebooks/Create time series.ipynb
MoshaLangerak/Data_Challenge_2_Group_18
34325993376edb967723611d0a86ccef748932d1
[ "MIT" ]
null
null
null
Support notebooks/Create time series.ipynb
MoshaLangerak/Data_Challenge_2_Group_18
34325993376edb967723611d0a86ccef748932d1
[ "MIT" ]
null
null
null
32.481663
271
0.389432
[ [ [ "import pandas as pd\nimport numpy as np", "_____no_output_____" ], [ "def load_forces(forces):\n df_streets = dict()\n \n for force in forces:\n file_path_streets = './Data/force_data/' + force + '_street.csv'\n df_streets[force] = pd.read_csv(file_path_streets, low_memory=False, index_col=0)\n \n return df_streets", "_____no_output_____" ] ], [ [ "The forces around London are: \\\nMetropolitan Police Service \\\nCity of London Police \\\nKent Police \\\nSussex Police \\\nSurrey Police \\\nEssex Police \\\nHertfordshire Police \\\nThames Valley Police \\\nBedfordshire Police \\\nHampshire Police", "_____no_output_____" ] ], [ [ "forces = ['metropolitan', 'city-of-london', 'kent', 'sussex', 'surrey', \n 'essex', 'hertfordshire', 'thames-valley', 'bedfordshire', 'hampshire']\ndf_streets = load_forces(forces)", "_____no_output_____" ], [ "df_streets_all = pd.DataFrame()\n\nfor key in forces:\n df_streets_all = pd.concat([df_streets_all, df_streets[key]], ignore_index=True)", "_____no_output_____" ], [ "df_streets_all.dtypes", "_____no_output_____" ], [ "file_path_employment = './Data/2019_employment.csv'\ndf_employment = pd.read_csv(file_path_employment, low_memory=False, sep=';')", "_____no_output_____" ], [ "df_employment.dtypes", "_____no_output_____" ], [ "df_employment[df_employment['LSOA Code (2011)'] == 'E01000027']", "_____no_output_____" ], [ "df_employment.columns", "_____no_output_____" ], [ "df_employment[['LSOA Code (2011)', 'Local Authority District code (2019)', 'Local Authority District name (2019)', 'Employment Domain Score']]\n ", "_____no_output_____" ], [ "df_streets_all = df_streets_all.merge(df_employment[['LSOA Code (2011)', 'Local Authority District code (2019)', 'Local Authority District name (2019)', 'Employment Domain Score']]\n , how = 'left', left_on = 'LSOA code', right_on = 'LSOA Code (2011)')", "_____no_output_____" ], [ "df_streets_all = df_streets_all.drop(['LSOA Code (2011)'], axis=1)\n", "_____no_output_____" ], [ "file_path_income = './Data/2019_income.csv'\ndf_income = pd.read_csv(file_path_income, low_memory=False, sep=';')", "_____no_output_____" ], [ "df_income.dtypes", "_____no_output_____" ], [ "df_income[df_income['LSOA Code (2011)'] == 'E01000027']", "_____no_output_____" ], [ "df_income.columns", "_____no_output_____" ], [ "df_income[['LSOA Code (2011)', 'Income Domain Score', 'IDACI Score', 'IDAOPI Score']]", "_____no_output_____" ], [ "df_streets_all = df_streets_all.merge(df_income[['LSOA Code (2011)', 'Income Domain Score', 'IDACI Score', 'IDAOPI Score']], how = 'left', left_on = 'LSOA code', right_on = 'LSOA Code (2011)')", "_____no_output_____" ], [ "df_streets_all = df_streets_all.drop(['LSOA Code (2011)'], axis=1)", "_____no_output_____" ], [ "df_streets_all.head()", "_____no_output_____" ], [ "file_path_police_strength = './Data/police_strength.csv'\ndf_police_strength = pd.read_csv(file_path_police_strength, low_memory=False, sep=';')", "_____no_output_____" ], [ "df_police_strength.head()", "_____no_output_____" ], [ "df_police_strength.dtypes", "_____no_output_____" ], [ "df_police_strength.columns", "_____no_output_____" ], [ "df_police_strength[['force_name', '2019']]", "_____no_output_____" ], [ "df_streets_all['Reported by'].unique()", "_____no_output_____" ], [ "force_conv = {'Metropolitan Police':'Metropolitan Police Service', \n 'London, City of':'City of London Police',\n 'Kent':'Kent Police', \n 'Hampshire':'Hampshire Constabulary',\n 'Avon & Somerset':'Avon and Somerset Constabulary', \n 'Sussex':'Sussex Police', \n 'Surrey':'Surrey Police',\n 'Essex':'Essex Police',\n 'Hertfordshire':'Hertfordshire Constabulary',\n 'Thames Valley':'Thames Valley Police',\n 'Bedfordshire':'Bedfordshire Police'}", "_____no_output_____" ], [ "df_police_strength['force_name'] = df_police_strength['force_name'].map(force_conv, na_action='ignore')", "_____no_output_____" ], [ "df_police_strength[df_police_strength['force_name'] == 'Bedfordshire Police']['2003']", "_____no_output_____" ], [ "df_streets_all.head()", "_____no_output_____" ], [ "df_streets_all[]", "_____no_output_____" ], [ "for col in df_police_strength.columns[2:]:\n df_streets_all = df_streets_all.merge(df_police_strength[['force_name', col]], how = 'left', left_on = 'Reported by', right_on = 'force_name')\n df_streets_all = df_streets_all.drop(['force_name'], axis=1)\n\n ", "_____no_output_____" ], [ "df_streets_all = df_streets_all.merge(df_police_strength[['force_name', '2019']], how = 'left', left_on = 'Reported by', right_on = 'force_name')\n", "_____no_output_____" ], [ "df_streets_all = df_streets_all.drop(['force_name'], axis=1)\n", "_____no_output_____" ], [ "file_path_police_funding = './Data/police_funding.csv'\ndf_police_funding = pd.read_csv(file_path_police_funding, low_memory=False, sep=';')", "_____no_output_____" ], [ "df_police_funding", "_____no_output_____" ], [ "df_police_funding['Police force'] = df_police_funding['Police force'].map(force_conv, na_action='ignore')", "_____no_output_____" ], [ "df_streets_all = df_streets_all.merge(df_police_funding[['Police force', '2018-19']], how = 'left', left_on = 'Reported by', right_on = 'Police force')\n", "_____no_output_____" ], [ "df_streets_all.head(5)", "_____no_output_____" ], [ "df_streets_all = df_streets_all.drop(['Police force'], axis=1)\n", "_____no_output_____" ], [ "file_path_population = './Data/2018_population_data.csv'\ndf_population = pd.read_csv(file_path_population, low_memory=False, sep=';')", "_____no_output_____" ], [ "df_population", "_____no_output_____" ], [ "df_streets_all = df_streets_all.merge(df_population[['CODE', 'POPULATION (2018)']], how = 'left', left_on = 'Local Authority District code (2019)', right_on = 'CODE')\n", "_____no_output_____" ], [ "df_streets_all.head()", "_____no_output_____" ], [ "df_streets_all = df_streets_all.drop(['CODE'], axis=1)\n", "_____no_output_____" ], [ "df_streets_all.rename(columns = {'2019':'Police Strength', '2018-19':'Police Funding', 'POPULATION (2018)':'Population'}, inplace = True)\n", "_____no_output_____" ], [ "df_2019 = df_streets_all[df_streets_all['Month'].str.contains('2019')]", "_____no_output_____" ], [ "df_2019.to_csv('./Data/2019_data.csv', index=False)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
c5313c8aeb3bff302a7543643088ae2b67dbc21b
40,272
ipynb
Jupyter Notebook
summer-of-code/week-04/day3_class.ipynb
debracupitt/toolkitten
94179a448b58f04dae4eb3e54c9a0ec740a4950d
[ "MIT" ]
719
2018-06-17T17:40:16.000Z
2022-03-28T00:21:48.000Z
summer-of-code/week-04/day3_class.ipynb
debracupitt/toolkitten
94179a448b58f04dae4eb3e54c9a0ec740a4950d
[ "MIT" ]
92
2018-06-26T13:06:21.000Z
2020-03-17T19:25:35.000Z
summer-of-code/week-04/day3_class.ipynb
debracupitt/toolkitten
94179a448b58f04dae4eb3e54c9a0ec740a4950d
[ "MIT" ]
784
2018-06-18T08:05:30.000Z
2022-02-20T13:31:25.000Z
56.167364
4,366
0.618544
[ [ [ "# 1millionwomentotech SummerOfCode\n\n## Intro to AI: Week 4 Day 3", "_____no_output_____" ] ], [ [ "print(baby_train[50000]['reviewText'])", "I did hours of research on strollers... contemplating between the First Years Indigo and theBumbleride Flyer Reversible Handle Stroller with 7\" Wheels, Seagrass. I finally settled on the Indigo stroller because the price was right and the features I wanted were all there. I purchased the stroller and matchingThe First Years Via Infant Car Seat, Abstract O'scar seat. My son was born in the winter and I didn't use the Indigo stroller regularly until about two months ago. For the first week, the stroller was wonderful. What I had initially liked most about the stroller was that I could easily attach the matching First Years Via car seat. In addition, the ride was smooth, quiet, and comfy. The stroller was a breeze to fold. Then, about two weeks into regular use, I started to notice the quality of the stroller diminish quickly. Here is what started to happen:(1) SQUEAKY WHEELS. The wheels started squeaking terribly. I'd oil them and within a few days the stroller was squeaking again! I'd be in a store and people would turn there heads to see who had the annoyingly loud stroller.(2) VIA CAR SEAT WOULD GET CAUGHT. This is what really bothered me. I could live with the squeaking, but this I could not. My son is 6 months old and we still use the infant car seat regularly. One day the car seat began to get stuck inside the left car seat stroller locking mechanism. We thought maybe it was our son's weight causing the problem, so we took him out of the car seat and tried again. No luck! We still couldn't get the car seat out. After fidgeting with the stroller and car seat for nearly 30minutes, we finally got the car seat unstuck! To test things out to see whether or not it was the stroller or car seat causing the problem, we decided to attached seat that came with the stroller. That too would get stuck and we wrestled around with it until it miraculously came unstuck.Unfortunately, this stroller just had to be returned (thank goodness for Amazon's 365 day return policy on baby items). As much as I liked its other features, I couldn't put up with its defective qualities. Perhaps I received a defective stroller AND car seat but I find that really hard to believe.In addition to it being defective, here is what else I didn't like about the stroller.(1) BULKY. I knew when I purchased this stroller that it would be a standard size stroller but I still feel it was unnecessarily bulky. In order to fold it up, you have to remove the seat and then fold up the frame. Before I had my son I didn't think this was a big deal. Now I know better. I'd recommend you find a one piece fold-up stroller like the Bumbleride Flyer.(2) NO CUPHOLDERS. There are no cup holders for the child or adult using the stroller. At the time I purchased the stroller, there were also no accessories available that could attach to the stroller. I think this is the huge oversight.(3) SEAT HAD TO BE REMOVED. I really found it annoying that I had to remove the seat in order to collapse the stroller or switch the stroller from back to forward facing. As I said earlier, I initially thought this was a frivolous disadvantage but I now think otherwise. Other strollers that have the same back to forward facing option allow you to simply switch the direction of the handles without having to remove the seat. If First Years ever redesigns this stroller, they should include that option in their redesign.Even though I decided to return the stroller, there are still some qualities that I liked about this stroller. Here is what I liked:(1) BACK AND FORWARD FACING. I loved that I could face my son towards me when he was an infant and away from me when he gets old enough that he wanted to explore.(2) EASY TO COLLAPSE. Although the stroller was bulky, the stroller was still a breeze to collapse.(3) EUROPEAN STYLING. In addition the practicality, I was looking for a European styled stroller. This is the most affordable stroller that is also stylish.After I returned the First Years Indigo Stroller, I went ahead and purchased theBumbleride Flyer Reversible Handle Stroller with 7\" Wheels, Seagrass. I will update this review in the near future with a comparison of the two after I get more use of theBumbleride Flyer Reversible Handle Stroller with 7\" Wheels, Seagrass. If you can afford to splurge on the Bumbleride Flyer, I would highly recommend it.\n" ], [ "from nltk.sentiment.vader import SentimentIntensityAnalyzer\nsia = SentimentIntensityAnalyzer()\ntext = baby_train[50000]['reviewText']\nfor s in sent_tokenize(text):\n print(s)\n print(sia.polarity_scores(s))", "I did hours of research on strollers... contemplating between the First Years Indigo and theBumbleride Flyer Reversible Handle Stroller with 7\" Wheels, Seagrass.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nI finally settled on the Indigo stroller because the price was right and the features I wanted were all there.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nI purchased the stroller and matchingThe First Years Via Infant Car Seat, Abstract O'scar seat.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nMy son was born in the winter and I didn't use the Indigo stroller regularly until about two months ago.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nFor the first week, the stroller was wonderful.\n{'neg': 0.0, 'neu': 0.654, 'pos': 0.346, 'compound': 0.5719}\nWhat I had initially liked most about the stroller was that I could easily attach the matching First Years Via car seat.\n{'neg': 0.0, 'neu': 0.776, 'pos': 0.224, 'compound': 0.6369}\nIn addition, the ride was smooth, quiet, and comfy.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nThe stroller was a breeze to fold.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nThen, about two weeks into regular use, I started to notice the quality of the stroller diminish quickly.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nHere is what started to happen:(1) SQUEAKY WHEELS.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nThe wheels started squeaking terribly.\n{'neg': 0.474, 'neu': 0.526, 'pos': 0.0, 'compound': -0.5574}\nI'd oil them and within a few days the stroller was squeaking again!\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nI'd be in a store and people would turn there heads to see who had the annoyingly loud stroller.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\n(2) VIA CAR SEAT WOULD GET CAUGHT.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nThis is what really bothered me.\n{'neg': 0.341, 'neu': 0.659, 'pos': 0.0, 'compound': -0.3804}\nI could live with the squeaking, but this I could not.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nMy son is 6 months old and we still use the infant car seat regularly.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nOne day the car seat began to get stuck inside the left car seat stroller locking mechanism.\n{'neg': 0.111, 'neu': 0.889, 'pos': 0.0, 'compound': -0.25}\nWe thought maybe it was our son's weight causing the problem, so we took him out of the car seat and tried again.\n{'neg': 0.109, 'neu': 0.891, 'pos': 0.0, 'compound': -0.4019}\nNo luck!\n{'neg': 0.401, 'neu': 0.0, 'pos': 0.599, 'compound': 0.2714}\nWe still couldn't get the car seat out.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nAfter fidgeting with the stroller and car seat for nearly 30minutes, we finally got the car seat unstuck!\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nTo test things out to see whether or not it was the stroller or car seat causing the problem, we decided to attached seat that came with the stroller.\n{'neg': 0.088, 'neu': 0.912, 'pos': 0.0, 'compound': -0.4019}\nThat too would get stuck and we wrestled around with it until it miraculously came unstuck.Unfortunately, this stroller just had to be returned (thank goodness for Amazon's 365 day return policy on baby items).\n{'neg': 0.054, 'neu': 0.865, 'pos': 0.081, 'compound': 0.25}\nAs much as I liked its other features, I couldn't put up with its defective qualities.\n{'neg': 0.164, 'neu': 0.678, 'pos': 0.158, 'compound': -0.0258}\nPerhaps I received a defective stroller AND car seat but I find that really hard to believe.In addition to it being defective, here is what else I didn't like about the stroller.\n{'neg': 0.304, 'neu': 0.696, 'pos': 0.0, 'compound': -0.8592}\n(1) BULKY.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nI knew when I purchased this stroller that it would be a standard size stroller but I still feel it was unnecessarily bulky.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nIn order to fold it up, you have to remove the seat and then fold up the frame.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nBefore I had my son I didn't think this was a big deal.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nNow I know better.\n{'neg': 0.0, 'neu': 0.408, 'pos': 0.592, 'compound': 0.4404}\nI'd recommend you find a one piece fold-up stroller like the Bumbleride Flyer.\n{'neg': 0.0, 'neu': 0.667, 'pos': 0.333, 'compound': 0.6124}\n(2) NO CUPHOLDERS.\n{'neg': 0.595, 'neu': 0.405, 'pos': 0.0, 'compound': -0.4466}\nThere are no cup holders for the child or adult using the stroller.\n{'neg': 0.155, 'neu': 0.845, 'pos': 0.0, 'compound': -0.296}\nAt the time I purchased the stroller, there were also no accessories available that could attach to the stroller.\n{'neg': 0.115, 'neu': 0.885, 'pos': 0.0, 'compound': -0.296}\nI think this is the huge oversight.\n{'neg': 0.0, 'neu': 0.685, 'pos': 0.315, 'compound': 0.3182}\n(3) SEAT HAD TO BE REMOVED.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nI really found it annoying that I had to remove the seat in order to collapse the stroller or switch the stroller from back to forward facing.\n{'neg': 0.211, 'neu': 0.789, 'pos': 0.0, 'compound': -0.7322}\nAs I said earlier, I initially thought this was a frivolous disadvantage but I now think otherwise.\n{'neg': 0.137, 'neu': 0.863, 'pos': 0.0, 'compound': -0.2263}\nOther strollers that have the same back to forward facing option allow you to simply switch the direction of the handles without having to remove the seat.\n{'neg': 0.0, 'neu': 0.932, 'pos': 0.068, 'compound': 0.2263}\nIf First Years ever redesigns this stroller, they should include that option in their redesign.Even though I decided to return the stroller, there are still some qualities that I liked about this stroller.\n{'neg': 0.0, 'neu': 0.915, 'pos': 0.085, 'compound': 0.4215}\nHere is what I liked:(1) BACK AND FORWARD FACING.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nI loved that I could face my son towards me when he was an infant and away from me when he gets old enough that he wanted to explore.\n{'neg': 0.0, 'neu': 0.87, 'pos': 0.13, 'compound': 0.5994}\n(2) EASY TO COLLAPSE.\n{'neg': 0.411, 'neu': 0.209, 'pos': 0.38, 'compound': -0.0772}\nAlthough the stroller was bulky, the stroller was still a breeze to collapse.\n{'neg': 0.225, 'neu': 0.775, 'pos': 0.0, 'compound': -0.4939}\n(3) EUROPEAN STYLING.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nIn addition the practicality, I was looking for a European styled stroller.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nThis is the most affordable stroller that is also stylish.After I returned the First Years Indigo Stroller, I went ahead and purchased theBumbleride Flyer Reversible Handle Stroller with 7\" Wheels, Seagrass.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nI will update this review in the near future with a comparison of the two after I get more use of theBumbleride Flyer Reversible Handle Stroller with 7\" Wheels, Seagrass.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nIf you can afford to splurge on the Bumbleride Flyer, I would highly recommend it.\n{'neg': 0.0, 'neu': 0.823, 'pos': 0.177, 'compound': 0.4201}\n" ], [ "def sia_features(dataset):\n \"\"\"For each review text in the dataset, extract:\n (1) the mean positive sentiment over all sentences\n (2) the mean neutral sentiment over all sentences\n (3) the mean negative sentiment over all sentences\n (4) the maximum positive sentiment over all sentences\n (5) the maximum neutral sentiment over all sentences\n (6) the maximum negative sentiment over all sentences\"\"\"\n feat_matrix = numpy.empty((len(dataset), 6))\n for i in range(len(dataset)):\n sentences = sent_tokenize(dataset[i]['reviewText'])\n nsent = len(sentences)\n if nsent:\n sentence_polarities = numpy.empty((nsent, 3))\n for j in range(nsent):\n polarity = sia.polarity_scores(sentences[j])\n sentence_polarities[j, 0] = polarity['pos']\n sentence_polarities[j, 1] = polarity['neu']\n sentence_polarities[j, 2] = polarity['neg']\n feat_matrix[i, 0:3] = numpy.mean(sentence_polarities, axis=0) # mean over the columns\n feat_matrix[i, 3:6] = numpy.max(sentence_polarities, axis=0) # maximum over the columns\n else:\n feat_matrix[i, 0:6] = 0.0\n return feat_matrix\n\nsia_tr = sia_features(baby_train)", "_____no_output_____" ], [ "testmat = numpy.arange(12.).reshape((3, 4))\nprint(testmat)\nprint(numpy.max(testmat, axis=0))\nprint(numpy.mean(testmat, axis=1))", "[[ 0. 1. 2. 3.]\n [ 4. 5. 6. 7.]\n [ 8. 9. 10. 11.]]\n[ 8. 9. 10. 11.]\n[1.5 5.5 9.5]\n" ], [ "def len_features(dataset):\n \"\"\"Add two features:\n (1) length of review (in thousands of characters) - truncate at 2,500\n (2) percentage of exclamation marks (in %)\"\"\"\n feat_matrix = numpy.empty((len(dataset), 2))\n for i in range(len(dataset)):\n text = dataset[i]['reviewText']\n feat_matrix[i, 0] = len(text) / 1000.\n if text:\n feat_matrix[i, 1] = 100. * text.count('!') / len(text)\n else:\n feat_matrix[i, 1] = 0.0\n feat_matrix[feat_matrix>2.5] = 2.5\n return feat_matrix\n\nlen_tr = len_features(baby_train)", "_____no_output_____" ], [ "print(X_train_neg.shape, sia_tr.shape, len_tr.shape)", "(96512, 2) (96512, 6) (96512, 2)\n" ], [ "X_train_augmented = numpy.concatenate((X_train_neg, sia_tr, len_tr), axis=1) # stack horizontally\nlreg_augmented = LinearRegression().fit(X_train_augmented, Y_train)\npred_train_augmented = lreg_augmented.predict(X_train_augmented)\nmae_train_augmented = mean_absolute_error(pred_train_augmented, Y_train)\nprint(\"Now the mean absolute error on the training data is %f stars\" % mae_train_augmented)", "Now the mean absolute error on the training data is 0.758256 stars\n" ], [ "rf_augmented = RandomForestRegressor().fit(X_train_augmented, Y_train)\nrfpred_train_augmented = rf_augmented.predict(X_train_augmented)\nmae_train_rf_augmented = mean_absolute_error(rfpred_train_augmented, Y_train)\nprint(\"For the RF, it is %f stars\" % mae_train_rf_augmented)", "For the RF, it is 0.283528 stars\n" ], [ "X_valid_neg = dataset_to_matrix_with_neg(baby_valid)\nsia_valid = sia_features(baby_valid)\nlen_valid = len_features(baby_valid)\nX_valid_augmented = numpy.concatenate((X_valid_neg, sia_valid, len_valid), axis=1)\npred_valid_augmented = lreg_augmented.predict(X_valid_augmented)\npred_valid_rf_augmented = rf_augmented.predict(X_valid_augmented)\nmae_valid_augmented = mean_absolute_error(pred_valid_augmented, Y_valid)\nprint(\"On the validation set, we get %f error for the linear regression\" % mae_valid_augmented)\nmae_valid_rf_augmented = mean_absolute_error(pred_valid_rf_augmented, Y_valid)\nprint(\"And %f for the random forest regression\" % mae_valid_rf_augmented)", "On the validation set, we get 0.755795 error for the linear regression\nAnd 0.731631 for the random forest regression\n" ], [ "print(baby_train[50000]['reviewText'])", "I did hours of research on strollers... contemplating between the First Years Indigo and theBumbleride Flyer Reversible Handle Stroller with 7\" Wheels, Seagrass. I finally settled on the Indigo stroller because the price was right and the features I wanted were all there. I purchased the stroller and matchingThe First Years Via Infant Car Seat, Abstract O'scar seat. My son was born in the winter and I didn't use the Indigo stroller regularly until about two months ago. For the first week, the stroller was wonderful. What I had initially liked most about the stroller was that I could easily attach the matching First Years Via car seat. In addition, the ride was smooth, quiet, and comfy. The stroller was a breeze to fold. Then, about two weeks into regular use, I started to notice the quality of the stroller diminish quickly. Here is what started to happen:(1) SQUEAKY WHEELS. The wheels started squeaking terribly. I'd oil them and within a few days the stroller was squeaking again! I'd be in a store and people would turn there heads to see who had the annoyingly loud stroller.(2) VIA CAR SEAT WOULD GET CAUGHT. This is what really bothered me. I could live with the squeaking, but this I could not. My son is 6 months old and we still use the infant car seat regularly. One day the car seat began to get stuck inside the left car seat stroller locking mechanism. We thought maybe it was our son's weight causing the problem, so we took him out of the car seat and tried again. No luck! We still couldn't get the car seat out. After fidgeting with the stroller and car seat for nearly 30minutes, we finally got the car seat unstuck! To test things out to see whether or not it was the stroller or car seat causing the problem, we decided to attached seat that came with the stroller. That too would get stuck and we wrestled around with it until it miraculously came unstuck.Unfortunately, this stroller just had to be returned (thank goodness for Amazon's 365 day return policy on baby items). As much as I liked its other features, I couldn't put up with its defective qualities. Perhaps I received a defective stroller AND car seat but I find that really hard to believe.In addition to it being defective, here is what else I didn't like about the stroller.(1) BULKY. I knew when I purchased this stroller that it would be a standard size stroller but I still feel it was unnecessarily bulky. In order to fold it up, you have to remove the seat and then fold up the frame. Before I had my son I didn't think this was a big deal. Now I know better. I'd recommend you find a one piece fold-up stroller like the Bumbleride Flyer.(2) NO CUPHOLDERS. There are no cup holders for the child or adult using the stroller. At the time I purchased the stroller, there were also no accessories available that could attach to the stroller. I think this is the huge oversight.(3) SEAT HAD TO BE REMOVED. I really found it annoying that I had to remove the seat in order to collapse the stroller or switch the stroller from back to forward facing. As I said earlier, I initially thought this was a frivolous disadvantage but I now think otherwise. Other strollers that have the same back to forward facing option allow you to simply switch the direction of the handles without having to remove the seat. If First Years ever redesigns this stroller, they should include that option in their redesign.Even though I decided to return the stroller, there are still some qualities that I liked about this stroller. Here is what I liked:(1) BACK AND FORWARD FACING. I loved that I could face my son towards me when he was an infant and away from me when he gets old enough that he wanted to explore.(2) EASY TO COLLAPSE. Although the stroller was bulky, the stroller was still a breeze to collapse.(3) EUROPEAN STYLING. In addition the practicality, I was looking for a European styled stroller. This is the most affordable stroller that is also stylish.After I returned the First Years Indigo Stroller, I went ahead and purchased theBumbleride Flyer Reversible Handle Stroller with 7\" Wheels, Seagrass. I will update this review in the near future with a comparison of the two after I get more use of theBumbleride Flyer Reversible Handle Stroller with 7\" Wheels, Seagrass. If you can afford to splurge on the Bumbleride Flyer, I would highly recommend it.\n" ], [ "from nltk.sentiment.vader import SentimentIntensityAnalyzer\nsia = SentimentIntensityAnalyzer()\ntext = baby_train[50000]['reviewText']\nfor s in sent_tokenize(text):\n print(s)\n print(sia.polarity_scores(s))", "I did hours of research on strollers... contemplating between the First Years Indigo and theBumbleride Flyer Reversible Handle Stroller with 7\" Wheels, Seagrass.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nI finally settled on the Indigo stroller because the price was right and the features I wanted were all there.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nI purchased the stroller and matchingThe First Years Via Infant Car Seat, Abstract O'scar seat.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nMy son was born in the winter and I didn't use the Indigo stroller regularly until about two months ago.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nFor the first week, the stroller was wonderful.\n{'neg': 0.0, 'neu': 0.654, 'pos': 0.346, 'compound': 0.5719}\nWhat I had initially liked most about the stroller was that I could easily attach the matching First Years Via car seat.\n{'neg': 0.0, 'neu': 0.776, 'pos': 0.224, 'compound': 0.6369}\nIn addition, the ride was smooth, quiet, and comfy.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nThe stroller was a breeze to fold.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nThen, about two weeks into regular use, I started to notice the quality of the stroller diminish quickly.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nHere is what started to happen:(1) SQUEAKY WHEELS.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nThe wheels started squeaking terribly.\n{'neg': 0.474, 'neu': 0.526, 'pos': 0.0, 'compound': -0.5574}\nI'd oil them and within a few days the stroller was squeaking again!\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nI'd be in a store and people would turn there heads to see who had the annoyingly loud stroller.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\n(2) VIA CAR SEAT WOULD GET CAUGHT.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nThis is what really bothered me.\n{'neg': 0.341, 'neu': 0.659, 'pos': 0.0, 'compound': -0.3804}\nI could live with the squeaking, but this I could not.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nMy son is 6 months old and we still use the infant car seat regularly.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nOne day the car seat began to get stuck inside the left car seat stroller locking mechanism.\n{'neg': 0.111, 'neu': 0.889, 'pos': 0.0, 'compound': -0.25}\nWe thought maybe it was our son's weight causing the problem, so we took him out of the car seat and tried again.\n{'neg': 0.109, 'neu': 0.891, 'pos': 0.0, 'compound': -0.4019}\nNo luck!\n{'neg': 0.401, 'neu': 0.0, 'pos': 0.599, 'compound': 0.2714}\nWe still couldn't get the car seat out.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nAfter fidgeting with the stroller and car seat for nearly 30minutes, we finally got the car seat unstuck!\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nTo test things out to see whether or not it was the stroller or car seat causing the problem, we decided to attached seat that came with the stroller.\n{'neg': 0.088, 'neu': 0.912, 'pos': 0.0, 'compound': -0.4019}\nThat too would get stuck and we wrestled around with it until it miraculously came unstuck.Unfortunately, this stroller just had to be returned (thank goodness for Amazon's 365 day return policy on baby items).\n{'neg': 0.054, 'neu': 0.865, 'pos': 0.081, 'compound': 0.25}\nAs much as I liked its other features, I couldn't put up with its defective qualities.\n{'neg': 0.164, 'neu': 0.678, 'pos': 0.158, 'compound': -0.0258}\nPerhaps I received a defective stroller AND car seat but I find that really hard to believe.In addition to it being defective, here is what else I didn't like about the stroller.\n{'neg': 0.304, 'neu': 0.696, 'pos': 0.0, 'compound': -0.8592}\n(1) BULKY.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nI knew when I purchased this stroller that it would be a standard size stroller but I still feel it was unnecessarily bulky.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nIn order to fold it up, you have to remove the seat and then fold up the frame.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nBefore I had my son I didn't think this was a big deal.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nNow I know better.\n{'neg': 0.0, 'neu': 0.408, 'pos': 0.592, 'compound': 0.4404}\nI'd recommend you find a one piece fold-up stroller like the Bumbleride Flyer.\n{'neg': 0.0, 'neu': 0.667, 'pos': 0.333, 'compound': 0.6124}\n(2) NO CUPHOLDERS.\n{'neg': 0.595, 'neu': 0.405, 'pos': 0.0, 'compound': -0.4466}\nThere are no cup holders for the child or adult using the stroller.\n{'neg': 0.155, 'neu': 0.845, 'pos': 0.0, 'compound': -0.296}\nAt the time I purchased the stroller, there were also no accessories available that could attach to the stroller.\n{'neg': 0.115, 'neu': 0.885, 'pos': 0.0, 'compound': -0.296}\nI think this is the huge oversight.\n{'neg': 0.0, 'neu': 0.685, 'pos': 0.315, 'compound': 0.3182}\n(3) SEAT HAD TO BE REMOVED.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nI really found it annoying that I had to remove the seat in order to collapse the stroller or switch the stroller from back to forward facing.\n{'neg': 0.211, 'neu': 0.789, 'pos': 0.0, 'compound': -0.7322}\nAs I said earlier, I initially thought this was a frivolous disadvantage but I now think otherwise.\n{'neg': 0.137, 'neu': 0.863, 'pos': 0.0, 'compound': -0.2263}\nOther strollers that have the same back to forward facing option allow you to simply switch the direction of the handles without having to remove the seat.\n{'neg': 0.0, 'neu': 0.932, 'pos': 0.068, 'compound': 0.2263}\nIf First Years ever redesigns this stroller, they should include that option in their redesign.Even though I decided to return the stroller, there are still some qualities that I liked about this stroller.\n{'neg': 0.0, 'neu': 0.915, 'pos': 0.085, 'compound': 0.4215}\nHere is what I liked:(1) BACK AND FORWARD FACING.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nI loved that I could face my son towards me when he was an infant and away from me when he gets old enough that he wanted to explore.\n{'neg': 0.0, 'neu': 0.87, 'pos': 0.13, 'compound': 0.5994}\n(2) EASY TO COLLAPSE.\n{'neg': 0.411, 'neu': 0.209, 'pos': 0.38, 'compound': -0.0772}\nAlthough the stroller was bulky, the stroller was still a breeze to collapse.\n{'neg': 0.225, 'neu': 0.775, 'pos': 0.0, 'compound': -0.4939}\n(3) EUROPEAN STYLING.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nIn addition the practicality, I was looking for a European styled stroller.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nThis is the most affordable stroller that is also stylish.After I returned the First Years Indigo Stroller, I went ahead and purchased theBumbleride Flyer Reversible Handle Stroller with 7\" Wheels, Seagrass.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nI will update this review in the near future with a comparison of the two after I get more use of theBumbleride Flyer Reversible Handle Stroller with 7\" Wheels, Seagrass.\n{'neg': 0.0, 'neu': 1.0, 'pos': 0.0, 'compound': 0.0}\nIf you can afford to splurge on the Bumbleride Flyer, I would highly recommend it.\n{'neg': 0.0, 'neu': 0.823, 'pos': 0.177, 'compound': 0.4201}\n" ], [ "def sia_features(dataset):\n \"\"\"For each review text in the dataset, extract:\n (1) mean positive sentiment over all sentences\n (2) mean neutral sentiment over all sentences\n (3) mean negative sentiment over all sentences\n (4) maximum positive sentiment over all sentences\n (5) maximum neutral sentiment over all sentences\n (6) maximum negative sentiment over all sentences\n \"\"\"\n feat_matrix = numpy.empty((len(dataset), 6))\n for i in range(len(dataset)):\n sentences = sent_tokenize(dataset[i]['reviewText'])\n nsent = len(sentences)\n if nsent:\n sentence_polarities = numpy.empty((nsent, 3))\n for j in range(nsent):\n polarity = sia.polarity_scores(sentences[j])\n sentence_polarities[j, 0] = polarity['pos']\n sentence_polarities[j, 1] = polarity['neu']\n sentence_polarities[j, 2] = polarity['neg']\n feat_matrix[i, 0:3] = numpy.mean(sentence_polarities, axis = 0) # mean over the columns\n feat_matrix[i, 3:6] = numpy.max(sentence_polarities, axis = 0) # maximum over the columns\n else:\n feat_matrix[i, 0:6] = 0.0\n return feat_matrix \n \nsia_tr = sia_features(baby_train)\nprint(sia_tr[:10])", "[[0.23533333 0.76466667 0. 0.552 1. 0. ]\n [0.0198 0.9678 0.0124 0.099 1. 0.062 ]\n [0.12725 0.8365 0.03625 0.194 0.865 0.145 ]\n [0.1595 0.7775 0.063 0.319 0.874 0.126 ]\n [0.073 0.927 0. 0.219 1. 0. ]\n [0.3115 0.6885 0. 0.571 1. 0. ]\n [0.172 0.8174 0.0106 0.398 1. 0.053 ]\n [0.1035 0.87575 0.02075 0.279 1. 0.096 ]\n [0.29133333 0.70866667 0. 0.406 0.787 0. ]\n [0.106 0.894 0. 0.318 1. 0. ]]\n" ], [ "testmat = numpy.arange(12.).reshape((3,4))\nprint(testmat)\nprint(numpy.max(testmat, axis = 0))\nprint(numpy.mean(testmat, axis = 1))", "[[ 0. 1. 2. 3.]\n [ 4. 5. 6. 7.]\n [ 8. 9. 10. 11.]]\n[ 8. 9. 10. 11.]\n[1.5 5.5 9.5]\n" ], [ "# Homework - required for Certification\n\ndef len_features(dataset):\n \"\"\"Add two features:\n (1) length of review (in thousands of character) - truncate at 2,500\n (2) percentage of exclamation marks (in %)\n \"\"\"\n\nlen_tr = len_features(baby_train) \n ", "_____no_output_____" ], [ "print(X_train_neg.shape, sia_tr.shape)", "(96512, 2) (96512, 6)\n" ], [ "# stack horizontally\nX_train_augmented = numpy.concatenate( (X_train_neg, sia_tr), axis = 1)\nlreg_augmented = LinearRegression().fit(X_train_augmented, Y_train)\npred_train_augmented = lreg_augmented.predict(X_train_augmented)\nmae_train_augmented = mean_absolute_error(pred_train_augmented, Y_train)\nprint(\"Now the mean absolute error on the training data is %f starts\" % mae_train_augmented)", "Now the mean absolute error on the training data is 0.759126 starts\n" ], [ "# random forest\nrf_augmented = RandomForestRegressor().fit(X_train_augmented, Y_train)\nrfpred_train_augmented = rf_augmented.predict(X_train_augmented)\nmae_train_rf_augmented = mean_absolute_error(rfpred_train_augmented, Y_train)\nprint(\"For the RF, MAE is %f stars\" % mae_train_rf_augmented)", "For the RF, MAE is 0.292433 stars\n" ], [ "X_valid_neg = dataset_to_matrix_with_neg(baby_valid)\nsia_valid = sia_features(baby_valid)\n# len_valid = \nX_valid_augmented = numpy.concatenate((X_valid_neg, sia_valid), axis = 1)\npred_valid_augmented =\npred_valid_rfaugmented =\n\nmae_valid_augmented = \nmae_valid_rfaugmented =", "_____no_output_____" ] ], [ [ "# Homework for certification\n\nRefactor the code above:\n- \"Be lazy. Not just lazy but proactively, agressively lazy.\" Remove duplication.\n- create a single function that takes in data and spits out all success metrics across all of your algos.\n", "_____no_output_____" ], [ "# Where to go from here?\n\n\n- unigrams (NLTK)\n- word vector (gensim, [glove](https://nlp.stanford.edu/projects/glove/), word2vec)\n- recurrent neural net\n- convolutional neural net\n\nhttps://www.oreilly.com/learning/perform-sentiment-analysis-with-lstms-using-tensorflow\n\nhttp://www.wildml.com/2015/11/understanding-convolutional-neural-networks-for-nlp/\n\nhttps://machinelearningmastery.com/develop-n-gram-multichannel-convolutional-neural-network-sentiment-analysis/", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ] ]